Exemple #1
0
class ShortLivedConnectionsRecipe(CommonHWSubConfigMixin, BaseEnrtRecipe):
    host1 = HostReq()
    host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))

    host2 = HostReq()
    host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))

    perf_tests = Param(default=("TCP_RR", "TCP_CRR"))
    ip_versions = Param(default=("ipv4",))
    perf_parallel_streams = IntParam(default=2)
    perf_msg_sizes = ListParam(default=[1000, 5000, 7000, 10000, 12000])

    def test_wide_configuration(self):
        host1, host2 = self.matched.host1, self.matched.host2

        configuration = super().test_wide_configuration()
        configuration.test_wide_devices = [host1.eth0, host2.eth0]

        net_addr = "192.168.101"
        for i, host in enumerate([host1, host2], 10):
            host.eth0.down()
            host.eth0.ip_add(ipaddress(net_addr + "." + str(i) + "/24"))
            host.eth0.up()

        self.wait_tentative_ips(configuration.test_wide_devices)

        return configuration

    def generate_test_wide_description(self, config):
        host1, host2 = self.matched.host1, self.matched.host2
        desc = super().generate_test_wide_description(config)
        desc += [
            "\n".join([
                "Configured {}.{}.ips = {}".format(
                    dev.host.hostid, dev.name, dev.ips
                )
                for dev in config.test_wide_devices
            ])
        ]
        return desc

    def test_wide_deconfiguration(self, config):
        del config.test_wide_devices

        super().test_wide_deconfiguration(config)

    def generate_perf_endpoints(self, config):
        return [(self.matched.host1.eth0, self.matched.host2.eth0)]

    @property
    def mtu_hw_config_dev_list(self):
        return [self.matched.host1.eth0, self.matched.host2.eth0]

    @property
    def dev_interrupt_hw_config_dev_list(self):
        return [self.matched.host1.eth0, self.matched.host2.eth0]

    @property
    def parallel_stream_qdisc_hw_config_dev_list(self):
        return [self.matched.host1.eth0, self.matched.host2.eth0]
Exemple #2
0
class LinuxPerf(BaseTestModule):
    output_file = StrParam(mandatory=True)
    cpus = ListParam(type=IntParam())
    events = ListParam(type=StrParam())

    def run(self) -> bool:
        self._res_data = {}
        if not is_installed("perf"):
            self._res_data["msg"] = "perf is not installed on this machine!"
            logging.error(self._res_data["msg"])
            return False

        # can't use lnst.Common.ExecCmd.exec_cmd directly, because expected returncode is not zero
        cmd: str = self._compose_cmd()
        logging.debug(f"Executing: \"{cmd}\"")
        process = subprocess.Popen(cmd,
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   close_fds=True)

        self.wait_for_interrupt()

        stdout, stderr = process.communicate()

        if stdout:
            log_output(logging.debug, "Stdout", stdout.decode())
        if stderr:
            log_output(logging.debug, "Stderr", stderr.decode())

        self._res_data["filename"] = os.path.abspath(self.params.output_file)
        return process.returncode == -2

    def _compose_cmd(self) -> str:
        cmd: str = "perf record"
        cmd += f" --output={self.params.output_file}"

        if "cpus" in self.params:
            cmd += f" --cpu={','.join(map(str, self.params.cpus))}"

        if "events" in self.params:
            cmd += f" --event={','.join(self.params.events)}"

        return cmd
Exemple #3
0
class BaseEnrtRecipe(SctpFirewallPerfTestMixin, BaseSubConfigMixin,
                     PingTestAndEvaluate, PerfRecipe):
    """Base Recipe class for the ENRT recipe package

    This class defines the shared *test* method defining the common test
    procedure in a very generic way. This common test procedure involves a
    single main *test_wide* configuration that is different for every specific
    scenario. After the main configuration there is usually a loop of several
    minor *sub* configrations types that can take different values to slightly
    change the tested use cases.

    Finally, for each combination of a **test_wide** + **sub** configuration we
    do a several ping connection test and several performance measurement tests.

    **test_wide** and **sub** configurations are implemented with **context
    manager** methods which ensure that if any exceptions are raised (for
    example because of a bug in the recipe) that deconfiguration is called.

    Both **test_wide** and **sub** configurations are to be implemented in
    different classes, the BaseEnrtRecipe class only defines the common API and
    the base versions of the relevant methods.

    Test wide configuration is implemented via the following methods:

    * :any:`test_wide_configuration`
    * :any:`test_wide_deconfiguration`
    * :any:`generate_test_wide_description`

    Sub configurations are **mixed into** classes defining the specific
    scenario that is being tested. Various sub configurations are implemented as
    individual Python **Mixin** classes in the
    :any:`ConfigMixins<config_mixins>` package. These make use of Pythons
    collaborative inheritance by calling the `super` function in a specific way.
    The "machinery" for that is defined in the :any:`BaseSubConfigMixin` class.
    It is then used in this class from the `test` method loop.

    :param driver:
        The driver parameter is used to modify the hw network requirements,
        specifically to request Devices using the specified driver. This is
        common enough in the Enrt recipes that it can be part of the Base class.

    :type driver: :any:`StrParam` (default "ixgbe")

    :param ip_versions:
        Parameter that determines which IP protocol versions will be tested.
    :type ip_versions: Tuple[Str] (default ("ipv4", "ipv6"))

    :param ping_parallel:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator method to create :any:`PingConf` objects that will
        be run in parallel.
    :type ping_parallel: :any:`BoolParam` (default False)

    :param ping_bidirect:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator method to create :any:`PingConf` objects for both
        directions between the ping endpoints.
    :type ping_bidirect: :any:`BoolParam` (default False)

    :param ping_count:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator how many pings should be sent for each ping test.
    :type ping_count: :any:`IntParam` (default 100)

    :param ping_interval:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator how fast should the pings be sent in each ping test.
    :type ping_interval: :any:`FloatParam` (default 0.2)

    :param ping_psize:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator how big should the pings packets be in each ping
        test.
    :type ping_psize: :any:`IntParam` (default None)

    :param perf_tests:
        Parameter used by the :any:`generate_flow_combinations` generator.
        Tells the generator what types of network flow measurements to generate
        perf test configurations for.
    :type perf_tests: Tuple[str] (default ("tcp_stream", "udp_stream",
        "sctp_stream"))

    :param perf_tool_cpu:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        indicate that the flow measurement should be pinned to a specific CPU
        core.
    :type perf_tool_cpu: :any:`IntParam` (optional parameter)

    :param perf_duration:
        Parameter used by the :any:`generate_perf_configurations` generator. To
        specify the duration of the performance measurements, in seconds.
    :type perf_duration: :any:`IntParam` (default 60)

    :param perf_iterations:
        Parameter used by the :any:`generate_perf_configurations` generator. To
        specify how many times should each performance measurement be repeated
        to generate cumulative results which can be statistically analyzed.
    :type perf_iterations: :any:`IntParam` (default 5)

    :param perf_parallel_streams:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        specify how many parallel streams of the same network flow should be
        measured at the same time.
    :type perf_parallel_streams: :any:`IntParam` (default 1)

    :param perf_msg_sizes:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        specify what different message sizes (in bytes) used generated for the
        network flow should be tested - each message size resulting in a
        separate performance measurement.
    :type perf_msg_sizes: List[Int] (default [123])

    :param net_perf_tool:
        Parameter used by the :any:`generate_perf_configurations` generator to
        create a PerfRecipeConf object.
        Specifies a network flow measurement class that accepts :any:`PerfFlow`
        objects and can be used to measure those specified flows
    :type net_perf_tool: :any:`BaseFlowMeasurement` (default
        IperfFlowMeasurement)

    :param cpu_perf_tool:
        Parameter used by the :any:`generate_perf_configurations` generator to
        create a PerfRecipeConf object.
        Specifies a cpu measurement class that can be used to measure CPU
        utilization on specified hosts.
    :type cpu_perf_tool: :any:`BaseCPUMeasurement` (default StatCPUMeasurement)
    """

    driver = StrParam(default="ixgbe")

    #common test parameters
    ip_versions = Param(default=("ipv4", "ipv6"))

    #common ping test params
    ping_parallel = BoolParam(default=False)
    ping_bidirect = BoolParam(default=False)
    ping_count = IntParam(default=100)
    ping_interval = FloatParam(default=0.2)
    ping_psize = IntParam(default=56)

    #common perf test params
    perf_tests = Param(default=("tcp_stream", "udp_stream", "sctp_stream"))
    perf_tool_cpu = IntParam(mandatory=False)
    perf_duration = IntParam(default=60)
    perf_iterations = IntParam(default=5)
    perf_parallel_streams = IntParam(default=1)
    perf_msg_sizes = ListParam(default=[123])

    net_perf_tool = Param(default=IperfFlowMeasurement)
    cpu_perf_tool = Param(default=StatCPUMeasurement)

    def test(self):
        """Main test loop shared by all the Enrt recipes

        The test loop involves a single application of a **test_wide**
        configuration, then a loop over multiple **sub** configurations that
        involves:

        * creating the combined sub configuration of all available SubConfig
          Mixin classes via :any:`generate_sub_configurations`
        * applying the generated sub configuration via the :any:`_sub_context`
          context manager method
        * running tests
        * removing the current sub configuration via the :any:`_sub_context`
          context manager method
        """
        with self._test_wide_context() as main_config:
            for sub_config in self.generate_sub_configurations(main_config):
                with self._sub_context(sub_config) as recipe_config:
                    self.do_tests(recipe_config)

    @contextmanager
    def _test_wide_context(self):
        config = self.test_wide_configuration()
        self.describe_test_wide_configuration(config)
        try:
            yield config
        finally:
            self.test_wide_deconfiguration(config)

    def test_wide_configuration(self):
        """Creates an empty :any:`EnrtConfiguration` object

        This is again used in potential collaborative inheritance design that
        may potentially be useful for Enrt recipes. Derived classes will each
        individually add their own values to the instance created here. This way
        the complete test wide configuration is tracked in a single object.

        :return: returns a config object that tracks the applied configuration
            that can be used during testing to inspect the current state and
            make test decisions based on it.
        :rtype: :any:`EnrtConfiguration`

        Example::

            class Derived:
                def test_wide_configuration():
                    config = super().test_wide_configuration()

                    # ... configure something
                    config.something = what_was_configured

                    return config
        """
        return EnrtConfiguration()

    def test_wide_deconfiguration(self, config):
        """Base deconfiguration method.

        In the base class this should maybe only check if there's any leftover
        configuration and warn about it. In derived classes this can be
        overriden to take care of deconfiguring what was configured in the
        respective test_wide_configuration method.

        Example::

            class Derived:
                def test_wide_deconfiguration(config):
                    # ... deconfigure something
                    del config.something #cleanup tracking

                    return super().test_wide_deconfiguration()
        """
        #TODO check if anything is still applied and throw exception?
        return

    def describe_test_wide_configuration(self, config):
        """Describes the current test wide configuration

        Creates a new result object that contains the description of the full
        test wide configuration applied by all the
        :any:`test_wide_configuration` methods in the class hierarchy.

        The description needs to be generated by the
        :any:`generate_test_wide_description` method. Additionally the
        description contains the state of all the parameters and their values
        passed to the recipe class instance during initialization.
        """
        description = self.generate_test_wide_description(config)
        self.add_result(
            True, "Summary of used Recipe parameters:\n{}".format(
                pprint.pformat(self.params._to_dict())))
        self.add_result(True, "\n".join(description))

    def generate_test_wide_description(self, config):
        """Generates the test wide configuration description

        Another class inteded to be used with the collaborative version of the
        `super` method to cumulatively desribe the full test wide configuration
        that was applied through multiple classes.

        The base class version of this method creates the initial list of
        strings containing just the header line. Each string added to this list
        will later be printed on its own line.

        :return: list of strings, each representing a single line
        :rtype: List[str]

        Example::

            class Derived:
                def generate_sub_configuration_description(config):
                    desc = super().generate_sub_configuration_description(config)
                    desc.append("Configured something: {}".format(config.something))
                    return desc
        """
        return [
            "Testwide configuration for recipe {} description:".format(
                self.__class__.__name__)
        ]

    @contextmanager
    def _sub_context(self, config):
        self.apply_sub_configuration(config)
        self.describe_sub_configuration(config)
        try:
            yield config
        finally:
            self.remove_sub_configuration(config)

    def describe_sub_configuration(self, config):
        description = self.generate_sub_configuration_description(config)
        self.add_result(True, "\n".join(description))

    def do_tests(self, recipe_config):
        """Entry point for actual tests

        The common scenario is to do ping and performance tests, however the
        method can be overriden to add more tests if needed.
        """
        self.do_ping_tests(recipe_config)
        self.do_perf_tests(recipe_config)

    def do_ping_tests(self, recipe_config):
        """Ping testing loop

        Loops over all various ping configurations generated by the
        :any:`generate_ping_configurations` method, then uses the PingRecipe
        methods to execute, report and evaluate the results.
        """
        for ping_configs in self.generate_ping_configurations(recipe_config):
            result = self.ping_test(ping_configs)
            self.ping_report_and_evaluate(result)

    def describe_perf_test_tweak(self, perf_config):
        description = self.generate_perf_test_tweak_description(perf_config)
        self.add_result(True, "\n".join(description))

    def do_perf_tests(self, recipe_config):
        """Performance testing loop

        Loops over all various perf configurations generated by the
        :any:`generate_perf_configurations` method, then uses the PerfRecipe
        methods to execute, report and evaluate the results.
        """
        for perf_config in self.generate_perf_configurations(recipe_config):
            self.apply_perf_test_tweak(perf_config)
            self.describe_perf_test_tweak(perf_config)
            try:
                result = self.perf_test(perf_config)
                self.perf_report_and_evaluate(result)
            finally:
                self.remove_perf_test_tweak(perf_config)

    def generate_ping_configurations(self, config):
        """Base ping test configuration generator

        The generator loops over all endpoint pairs to test ping between
        (generated by the :any:`generate_ping_endpoints` method) then over all
        the selected :any:`ip_versions` and finally over all the IP addresses
        that fit those criteria.

        :return: list of Ping configurations to test in parallel
        :rtype: List[:any:`PingConf`]
        """
        for endpoints in self.generate_ping_endpoints(config):
            for ipv in self.params.ip_versions:
                if ipv == "ipv6" and not endpoints.reachable:
                    continue

                ip_filter = {}
                if ipv == "ipv4":
                    ip_filter.update(family=AF_INET)
                elif ipv == "ipv6":
                    ip_filter.update(family=AF_INET6)
                    ip_filter.update(is_link_local=False)

                endpoint1, endpoint2 = endpoints.endpoints
                endpoint1_ips = endpoint1.ips_filter(**ip_filter)
                endpoint2_ips = endpoint2.ips_filter(**ip_filter)

                if len(endpoint1_ips) != len(endpoint2_ips):
                    raise LnstError(
                        "Source/destination ip lists are of different size.")

                ping_conf_list = []
                for src_addr, dst_addr in zip(endpoint1_ips, endpoint2_ips):
                    pconf = PingConf(
                        client=endpoint1.netns,
                        client_bind=src_addr,
                        destination=endpoint2.netns,
                        destination_address=dst_addr,
                        count=self.params.ping_count,
                        interval=self.params.ping_interval,
                        size=self.params.ping_psize,
                    )

                    ping_evaluators = self.generate_ping_evaluators(
                        pconf, endpoints)
                    pconf.register_evaluators(ping_evaluators)

                    ping_conf_list.append(pconf)

                    if self.params.ping_bidirect:
                        ping_conf_list.append(self._create_reverse_ping(pconf))

                    if not self.params.ping_parallel:
                        break

                yield ping_conf_list

    def generate_ping_endpoints(self, config):
        """Generator for ping endpoints

        To be overriden by a derived class.

        :return: list of device pairs
        :rtype: List[Tuple[:any:`Device`, :any:`Device`]]
        """
        return []

    def generate_ping_evaluators(self, pconf, endpoints):
        return [RatePingEvaluator(min_rate=50)]

    def generate_perf_configurations(self, config):
        """Base perf test configuration generator

        The generator loops over all flow combinations to measure performance
        for (generated by the :any:`generate_flow_combinations` method). In
        addition to that during each flow combination measurement we add CPU
        utilization measurement to run on the background.

        Finally for each generated perf test configuration we register
        measurement evaluators based on the :any:`cpu_perf_evaluators` and
        :any:`net_perf_evaluators` properties.

        :return: list of Perf test configurations
        :rtype: List[:any:`PerfRecipeConf`]
        """
        for flows in self.generate_flow_combinations(config):
            perf_recipe_conf = dict(
                recipe_config=config,
                flows=flows,
            )

            flows_measurement = self.params.net_perf_tool(
                flows, perf_recipe_conf)

            cpu_measurement_hosts = set()
            for flow in flows:
                cpu_measurement_hosts.add(flow.generator)
                cpu_measurement_hosts.add(flow.receiver)

            cpu_measurement = self.params.cpu_perf_tool(
                cpu_measurement_hosts,
                perf_recipe_conf,
            )

            perf_conf = PerfRecipeConf(
                measurements=[cpu_measurement, flows_measurement],
                iterations=self.params.perf_iterations,
            )

            perf_conf.register_evaluators(cpu_measurement,
                                          self.cpu_perf_evaluators)
            perf_conf.register_evaluators(flows_measurement,
                                          self.net_perf_evaluators)

            yield perf_conf

    def generate_flow_combinations(self, config):
        """Base flow combination generator

        The generator loops over all endpoint pairs to test performance between
        (generated by the :any:`generate_perf_endpoints` method) then over all
        the selected :any:`ip_versions` and uses the first IP address fitting
        these criteria. Then the generator loops over the selected performance
        tests as selected via :any:`perf_tests`, then message sizes from
        :any:`msg_sizes`.

        :return: list of Flow combinations to measure in parallel
        :rtype: List[:any:`PerfFlow`]
        """
        for client_nic, server_nic in self.generate_perf_endpoints(config):
            for ipv in self.params.ip_versions:
                ip_filter = {}
                if ipv == "ipv4":
                    ip_filter.update(family=AF_INET)
                elif ipv == "ipv6":
                    ip_filter.update(family=AF_INET6)
                    ip_filter.update(is_link_local=False)

                client_bind = client_nic.ips_filter(**ip_filter)[0]
                server_bind = server_nic.ips_filter(**ip_filter)[0]

                for perf_test in self.params.perf_tests:
                    for size in self.params.perf_msg_sizes:
                        yield [
                            self._create_perf_flow(
                                perf_test,
                                client_nic,
                                client_bind,
                                server_nic,
                                server_bind,
                                size,
                            )
                        ]

    def _create_perf_flow(self, perf_test, client_nic, client_bind, server_nic,
                          server_bind, msg_size) -> PerfFlow:
        """
        Wrapper to create a PerfFlow. Mixins that want to change this behavior (for example, to reverse the direction)
        can override this method as an alternative to overriding :any:`generate_flow_combinations`
        """
        cpupin = self.params.perf_tool_cpu if "perf_tool_cpu" in self.params else None
        return PerfFlow(
            type=perf_test,
            generator=client_nic.netns,
            generator_bind=client_bind,
            generator_nic=client_nic,
            receiver=server_nic.netns,
            receiver_bind=server_bind,
            receiver_nic=server_nic,
            msg_size=msg_size,
            duration=self.params.perf_duration,
            parallel_streams=self.params.perf_parallel_streams,
            cpupin=cpupin,
        )

    def generate_perf_endpoints(self, config):
        """Generator for perf endpoints

        To be overriden by a derived class.

        :return: list of device pairs
        :rtype: List[Tuple[:any:`Device`, :any:`Device`]]
        """
        return []

    @property
    def cpu_perf_evaluators(self):
        """CPU measurement evaluators

        To be overriden by a derived class. Returns the list of evaluators to
        use for CPU utilization measurement evaluation.

        :return: a list of cpu evaluator objects
        :rtype: List[BaseEvaluator]
        """
        return []

    @property
    def net_perf_evaluators(self):
        """Network flow measurement evaluators

        To be overriden bby a derived class. Returns the list of evaluators to
        use for Network flow measurement evaluation.

        :return: a list of flow evaluator objects
        :rtype: List[BaseEvaluator]
        """
        return [NonzeroFlowEvaluator()]

    def wait_tentative_ips(self, devices):
        def condition():
            return all(
                [not ip.is_tentative for dev in devices for ip in dev.ips])

        self.ctl.wait_for_condition(condition, timeout=5)

    def _create_reverse_ping(self, pconf):
        return PingConf(
            client=pconf.destination,
            client_bind=pconf.destination_address,
            destination=pconf.client,
            destination_address=pconf.client_bind,
            count=pconf.ping_count,
            interval=pconf.ping_interval,
            size=pconf.ping_psize,
        )
Exemple #4
0
class NeperMeasurementGenerator(BaseMeasurementGenerator):

    perf_tests = Param(default=("tcp_rr", "tcp_crr", "udp_rr"))
    perf_duration = IntParam(default=60)
    perf_iterations = IntParam(default=5)
    perf_tool_cpu = IntParam(mandatory=False)
    perf_parallel_streams = IntParam(default=1)
    perf_msg_sizes = ListParam(default=[123])

    net_perf_tool = Param(default=NeperFlowMeasurement)

    def generate_perf_measurements_combinations(self, config):
        combinations = super().generate_perf_measurements_combinations(config)
        for flow_combination in self.generate_flow_combinations(config):
            combinations.append([self.params.net_perf_tool(flow_combination)])
        return combinations

    def generate_flow_combinations(self, config):
        """Base flow combination generator

        The generator loops over all endpoint pairs to test performance between
        (generated by the :any:`generate_perf_endpoints` method) then over all
        the selected :any:`ip_versions` and uses the first IP address fitting
        these criteria. Then the generator loops over the selected performance
        tests as selected via :any:`perf_tests`, then message sizes from
        :any:`msg_sizes`.

        :return: list of Flow combinations to measure in parallel
        :rtype: List[:any:`PerfFlow`]
        """
        for client_nic, server_nic in self.generate_perf_endpoints(config):
            for ipv in self.params.ip_versions:
                ip_filter = {}
                if ipv == "ipv4":
                    ip_filter.update(family=AF_INET)
                elif ipv == "ipv6":
                    ip_filter.update(family=AF_INET6)
                    ip_filter.update(is_link_local=False)

                client_bind = client_nic.ips_filter(**ip_filter)[0]
                server_bind = server_nic.ips_filter(**ip_filter)[0]

                for perf_test in self.params.perf_tests:
                    for size in self.params.perf_msg_sizes:
                        yield [
                            self._create_perf_flow(
                                perf_test,
                                client_nic,
                                client_bind,
                                server_nic,
                                server_bind,
                                size,
                            )
                        ]

    def generate_perf_endpoints(self, config):
        """Generator for perf endpoints

        To be overriden by a derived class.

        :return: list of device pairs
        :rtype: List[Tuple[:any:`Device`, :any:`Device`]]
        """
        return []

    def _create_perf_flow(
        self,
        perf_test,
        client_nic,
        client_bind,
        server_nic,
        server_bind,
        msg_size,
    ) -> PerfFlow:
        """
        Wrapper to create a PerfFlow. Mixins that want to change this behavior (for example, to reverse the direction)
        can override this method as an alternative to overriding :any:`generate_flow_combinations`
        """
        return PerfFlow(
            type=perf_test,
            generator=client_nic.netns,
            generator_bind=client_bind,
            generator_nic=client_nic,
            receiver=server_nic.netns,
            receiver_bind=server_bind,
            receiver_nic=server_nic,
            msg_size=msg_size,
            duration=self.params.perf_duration,
            parallel_streams=self.params.perf_parallel_streams,
            cpupin=(self.params.perf_tool_cpu
                    if "perf_tool_cpu" in self.params else None),
        )
class IperfMeasurementGenerator(BaseMeasurementGenerator):
    """
    :param perf_tests:
        Parameter used by the :any:`generate_flow_combinations` generator.
        Tells the generator what types of network flow measurements to generate
        perf test configurations for.
    :type perf_tests: Tuple[str] (default ("tcp_stream", "udp_stream",
        "sctp_stream"))

    :param perf_tool_cpu:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        indicate that the flow measurement should be pinned to a specific CPU
        core.
    :type perf_tool_cpu: :any:`IntParam` (optional parameter)

    :param perf_duration:
        Parameter used by the :any:`generate_perf_configurations` generator. To
        specify the duration of the performance measurements, in seconds.
    :type perf_duration: :any:`IntParam` (default 60)

    :param perf_iterations:
        Parameter used by the :any:`generate_perf_configurations` generator. To
        specify how many times should each performance measurement be repeated
        to generate cumulative results which can be statistically analyzed.
    :type perf_iterations: :any:`IntParam` (default 5)

    :param perf_parallel_streams:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        specify how many parallel streams of the same network flow should be
        measured at the same time.
    :type perf_parallel_streams: :any:`IntParam` (default 1)

    :param perf_parallel_processes:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        specify how many parallel net_perf_tool processes of the same network flow
        should be measured at the same time.
    :type perf_parallel_processes: :any:`IntParam` (default 1)

    :param perf_msg_sizes:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        specify what different message sizes (in bytes) used generated for the
        network flow should be tested - each message size resulting in a
        separate performance measurement.
    :type perf_msg_sizes: List[Int] (default [123])
    """

    # common perf test params
    perf_tests = Param(default=("tcp_stream", "udp_stream", "sctp_stream"))
    perf_tool_cpu = ListParam(mandatory=False)
    perf_tool_cpu_policy = StrParam(mandatory=False)
    perf_duration = IntParam(default=60)
    perf_iterations = IntParam(default=5)
    perf_parallel_streams = IntParam(default=1)
    perf_parallel_processes = IntParam(default=1)
    perf_msg_sizes = ListParam(default=[123])

    net_perf_tool = Param(default=IperfFlowMeasurement)

    def generate_perf_measurements_combinations(self, config):
        combinations = super().generate_perf_measurements_combinations(config)
        for flow_combination in self.generate_flow_combinations(config):
            combinations.append([self.params.net_perf_tool(flow_combination)])
        return combinations

    def generate_flow_combinations(self, config):
        """Base flow combination generator

        The generator loops over all endpoint pairs to test performance between
        (generated by the :any:`generate_perf_endpoints` method) then over all
        the selected :any:`ip_versions` and uses the first IP address fitting
        these criteria. Then the generator loops over the selected performance
        tests as selected via :any:`perf_tests`, then message sizes from
        :any:`msg_sizes`.

        :return: list of Flow combinations to measure in parallel
        :rtype: List[:any:`PerfFlow`]
        """
        for client_nic, server_nic in self.generate_perf_endpoints(config):
            for ipv in self.params.ip_versions:
                ip_filter = {}
                if ipv == "ipv4":
                    ip_filter.update(family=AF_INET)
                elif ipv == "ipv6":
                    ip_filter.update(family=AF_INET6)
                    ip_filter.update(is_link_local=False)

                client_bind = client_nic.ips_filter(**ip_filter)[0]
                server_bind = server_nic.ips_filter(**ip_filter)[0]

                for perf_test in self.params.perf_tests:
                    for size in self.params.perf_msg_sizes:
                        yield self._create_perf_flows(
                            perf_test,
                            client_nic,
                            client_bind,
                            server_nic,
                            server_bind,
                            size,
                        )

    def generate_perf_endpoints(self, config):
        """Generator for perf endpoints

        To be overriden by a derived class.

        :return: list of device pairs
        :rtype: List[Tuple[:any:`Device`, :any:`Device`]]
        """
        return []

    def _create_perf_flows(
        self,
        perf_test,
        client_nic,
        client_bind,
        server_nic,
        server_bind,
        msg_size,
    ) -> List[PerfFlow]:
        flows = []
        port_offset=12000
        for i in range(self.params.perf_parallel_processes):
            flows.append(
                self._create_perf_flow(
                    perf_test,
                    client_nic,
                    client_bind,
                    server_nic,
                    server_bind,
                    port_offset + i,
                    msg_size,
                    self._cpupin_based_on_policy(i),
                )
            )

        return flows

    def _cpupin_based_on_policy(self, process_no=None):
        if process_no is None:
            return None

        try:
            cpus = self.params.perf_tool_cpu
        except:
            return None

        try:
            policy = self.params.perf_tool_cpu_policy
        except:
            return cpus

        if policy == 'round-robin':
            return [cpus[process_no % len(cpus)]]
        elif policy == 'all':
            return cpus
        else:
            raise Exception(f'Unknown perf_tool_cpu_policy {policy}')

    def _create_perf_flow(
        self,
        perf_test,
        client_nic,
        client_bind,
        server_nic,
        server_bind,
        server_port,
        msg_size,
        cpupin,
    ) -> PerfFlow:
        """
        Wrapper to create a PerfFlow. Mixins that want to change this behavior (for example, to reverse the direction)
        can override this method as an alternative to overriding :any:`generate_flow_combinations`
        """
        return PerfFlow(
            type=perf_test,
            generator=client_nic.netns,
            generator_bind=client_bind,
            generator_nic=client_nic,
            receiver=server_nic.netns,
            receiver_bind=server_bind,
            receiver_nic=server_nic,
            receiver_port=server_port,
            msg_size=msg_size,
            duration=self.params.perf_duration,
            parallel_streams=self.params.perf_parallel_streams,
            cpupin=cpupin,
        )
Exemple #6
0
class PacketAssert(BaseTestModule):
    interface = DeviceParam(mandatory=True)
    p_filter = StrParam(default="")
    grep_for = ListParam(default=[])
    promiscuous = BoolParam(default=False)
    _grep_exprs = []
    _p_recv = 0

    def _prepare_grep_exprs(self):
        for expr in self.params.grep_for:
            if expr is not None:
                self._grep_exprs.append(expr)

    def _compose_cmd(self):
        cmd = "tcpdump"
        if not self.params.promiscuous:
            cmd += " -p"
        iface = self.params.interface.name
        filt = self.params.p_filter
        cmd += ' -nn -i %s "%s"' % (iface, filt)

        return cmd

    def _check_line(self, line):
        if line != "":
            for exp in self._grep_exprs:
                if not re.search(exp, line):
                    return
            self._p_recv += 1

    def run(self):
        self._res_data = {}
        if not is_installed("tcpdump"):
            self._res_data["msg"] = "tcpdump is not installed on this machine!"
            logging.error(self._res_data["msg"])
            return False

        self._prepare_grep_exprs()
        cmd = self._compose_cmd()
        logging.debug("compiled command: {}".format(cmd))

        packet_assert_process = subprocess.Popen(
            cmd,
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            close_fds=True,
        )

        try:
            self.wait_for_interrupt()
        except:
            raise LnstError("Could not handle interrupt properly.")

        stdout, stderr = packet_assert_process.communicate()
        stdout = stdout.decode()
        stderr = stderr.decode()

        self._res_data["stderr"] = stderr
        # tcpdump always reports information to stderr, there may be actual
        # errors but also just generic debug information
        logging.debug(self._res_data["stderr"])

        for line in stdout.split("\n"):
            self._check_line(line)

        logging.debug("Capturing finised. Received %d packets." % self._p_recv)
        self._res_data["p_recv"] = self._p_recv

        if packet_assert_process.returncode != 0:
            return False
        else:
            return True
Exemple #7
0
class PacketAssert(BaseTestModule):
    interface = DeviceParam(mandatory=True)
    p_filter = StrParam(default='')
    grep_for = ListParam(default=[])
    promiscuous = BoolParam(default=False)
    _grep_exprs = []
    _p_recv = 0

    def _prepare_grep_exprs(self):
        for expr in self.params.grep_for:
            if expr is not None:
                self._grep_exprs.append(expr)

    def _compose_cmd(self):
        cmd = "tcpdump"
        if not self.params.promiscuous:
            cmd += " -p"
        iface = self.params.interface.name
        filt = self.params.p_filter
        cmd += " -nn -i %s \"%s\"" % (iface, filt)

        return cmd

    def _check_line(self, line):
        if line != "":
            for exp in self._grep_exprs:
                if not re.search(exp, line):
                    return
            self._p_recv += 1

    def _is_real_err(self, err):

        ignore_exprs = [r"tcpdump: verbose output suppressed, use -v or -vv for full protocol decode",
                        r"listening on %s, link-type .* \(.*\), capture size [0-9]* bytes" %
                        self.params.interface.name, r"\d+ packets captured",
                        r"\d+ packets received by filter", r"\d+ packets dropped by kernel"]

        for line in err.split('\n'):
            if not line:
                continue
            match = False
            for expr in ignore_exprs:
                if re.search(expr, line):
                    match = True
                    break
            if not match:
                return True
        return False

    def run(self):
        self._res_data = {}
        if not is_installed("tcpdump"):
            self._res_data["msg"] = "tcpdump is not installed on this machine!"
            logging.error(self._res_data["msg"])
            return False

        self._prepare_grep_exprs()
        cmd = self._compose_cmd()
        logging.debug("compiled command: {}".format(cmd))

        packet_assert_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE, close_fds=True)

        try:
            self.wait_for_interrupt()
        except:
            raise LnstError("Could not handle interrupt properly.")

        with packet_assert_process.stdout, packet_assert_process.stderr:
            stderr=packet_assert_process.stderr.read().decode()
            stdout=packet_assert_process.stdout.read().decode()

        self._res_data["stderr"] = stderr

        if self._is_real_err(stderr):
            self._res_data["msg"] = "errors reported by tcpdump"
            logging.error(self._res_data["msg"])
            logging.error(self._res_data["stderr"])
            return False

        for line in stdout.split("\n"):
            self._check_line(line)

        logging.debug("Capturing finised. Received %d packets." % self._p_recv)
        self._res_data["p_recv"] = self._p_recv

        return True
Exemple #8
0
class DevInterruptHWConfigMixin(BaseHWConfigMixin):
    """
    This class is an extension to the :any:`BaseEnrtRecipe` class that enables
    the CPU affinity (CPU pinning) of the test device IRQs. The test devices
    are defined by :attr:`dev_interrupt_hw_config_dev_list` property.

     .. note::
        Note that this Mixin also stops the irqbalance service.

    :param dev_intr_cpu:
        (optional test parameter) CPU ids to which the device IRQs should be pinned
    """

    dev_intr_cpu = ListParam(mandatory=False)

    @property
    def dev_interrupt_hw_config_dev_list(self):
        """
        The value of this property is a list of devices for which the IRQ CPU
        affinity should be configured. It has to be defined by a derived class.
        """
        return []

    def hw_config(self, config):
        super().hw_config(config)

        hw_config = config.hw_config

        if "dev_intr_cpu" in self.params:
            intr_cfg = hw_config["dev_intr_cpu_configuration"] = {}
            intr_cfg["irq_devs"] = {}
            intr_cfg["irqbalance_hosts"] = []

            hosts = []
            for dev in self.dev_interrupt_hw_config_dev_list:
                if dev.host not in hosts:
                    hosts.append(dev.host)
            for host in hosts:
                host.run("service irqbalance stop")
                intr_cfg["irqbalance_hosts"].append(host)

            for dev in self.dev_interrupt_hw_config_dev_list:
                # TODO better service handling through HostAPI
                self._pin_dev_interrupts(dev, self.params.dev_intr_cpu)
                intr_cfg["irq_devs"][dev] = self.params.dev_intr_cpu

    def hw_deconfig(self, config):
        intr_config = config.hw_config.get("dev_intr_cpu_configuration", {})
        for host in intr_config.get("irqbalance_hosts", []):
            host.run("service irqbalance start")

        super().hw_deconfig(config)

    def describe_hw_config(self, config):
        desc = super().describe_hw_config(config)

        hw_config = config.hw_config

        intr_cfg = hw_config.get("dev_intr_cpu_configuration", None)
        if intr_cfg:
            desc += [
                "{} irqbalance stopped".format(host.hostid)
                for host in intr_cfg["irqbalance_hosts"]
            ]
            desc += [
                "{}.{} irqs bound to cpu {}".format(
                    dev.host.hostid, dev._id, cpu
                )
                for dev, cpu in intr_cfg["irq_devs"].items()
            ]
        else:
            desc.append("Device irq configuration skipped.")
        return desc

    def _pin_dev_interrupts(self, dev, cpus):
        netns = dev.netns
        self._check_cpu_validity(netns, cpus)

        intrs = self._get_dev_interrupts(dev)

        for i, intr in enumerate(intrs):
            try:
                cpu = cpus[i % len(cpus)]
                netns.run(
                    "echo -n {} > /proc/irq/{}/smp_affinity_list".format(
                        cpu, intr
                    )
                )
            except ValueError:
                pass

    def _check_cpu_validity(self, host, cpus):
        cpu_info = host.run("lscpu", job_level=ResultLevel.DEBUG).stdout
        regex = "CPU\(s\): *([0-9]*)"
        num_cpus = int(re.search(regex, cpu_info).groups()[0])
        for cpu in cpus:
            if cpu < 0 or cpu > num_cpus - 1:
                raise RecipeError(
                    "Invalid CPU value given: %d. Accepted value %s."
                    % (
                        cpu,
                        "is: 0" if num_cpus == 1 else "are: 0..%d" % (num_cpus - 1),
                    )
                )

    def _get_dev_interrupts(self, dev):
        if "up" not in dev.state:
            # device needs to be UP when grepping /proc/interrupts
            dev.up()
            set_down = True
        else:
            set_down = False

        dev_id_regex = r"({})|({})".format(dev.name, dev.bus_info)
        res = dev.netns.run(
            "grep -P \"{}\" /proc/interrupts | cut -f1 -d: | sed 's/ //'".format(
                dev_id_regex
            ),
            job_level=ResultLevel.DEBUG,
        )

        if set_down:
            # set device back down if we set it up
            dev.down()

        return [int(intr.strip()) for intr in res.stdout.strip().split('\n')]
Exemple #9
0
class BaseEnrtRecipe(BaseSubConfigMixin, PingTestAndEvaluate, PerfRecipe):
    #common requirements parameters
    driver = StrParam(default="ixgbe")

    #common test parameters
    ip_versions = Param(default=("ipv4", "ipv6"))

    #common ping test params
    ping_parallel = BoolParam(default=False)
    ping_bidirect = BoolParam(default=False)
    ping_count = IntParam(default=100)
    ping_interval = StrParam(default=0.2)
    ping_psize = IntParam(default=None)

    #common perf test params
    perf_tests = Param(default=("tcp_stream", "udp_stream", "sctp_stream"))
    perf_tool_cpu = IntParam(mandatory=False)
    perf_duration = IntParam(default=60)
    perf_iterations = IntParam(default=5)
    perf_parallel_streams = IntParam(default=1)
    perf_msg_sizes = ListParam(default=[123])
    perf_reverse = BoolParam(default=False)

    net_perf_tool = Param(default=IperfFlowMeasurement)
    cpu_perf_tool = Param(default=StatCPUMeasurement)

    def test(self):
        with self._test_wide_context() as main_config:
            for sub_config in self.generate_sub_configurations(main_config):
                with self._sub_context(sub_config) as recipe_config:
                    self.do_tests(recipe_config)

    @contextmanager
    def _test_wide_context(self):
        config = self.test_wide_configuration()
        self.describe_test_wide_configuration(config)
        try:
            yield config
        finally:
            self.test_wide_deconfiguration(config)

    def test_wide_configuration(self):
        return EnrtConfiguration()

    def test_wide_deconfiguration(self, config):
        #TODO check if anything is still applied and throw exception?
        return

    def describe_test_wide_configuration(self, config):
        description = self.generate_test_wide_description(config)
        self.add_result(
            True, "Summary of used Recipe parameters:\n{}".format(
                pprint.pformat(self.params._to_dict())))
        self.add_result(True, "\n".join(description))

    def generate_test_wide_description(self, config):
        return [
            "Testwide configuration for recipe {} description:".format(
                self.__class__.__name__)
        ]

    @contextmanager
    def _sub_context(self, config):
        self.apply_sub_configuration(config)
        self.describe_sub_configuration(config)
        try:
            yield config
        finally:
            self.remove_sub_configuration(config)

    def describe_sub_configuration(self, config):
        description = self.generate_sub_configuration_description(config)
        self.add_result(True, "\n".join(description))

    def generate_sub_configuration_description(self, config):
        return ["Sub configuration description:"]

    def do_tests(self, recipe_config):
        self.do_ping_tests(recipe_config)
        self.do_perf_tests(recipe_config)

    def do_ping_tests(self, recipe_config):
        for ping_config in self.generate_ping_configurations(recipe_config):
            result = self.ping_test(ping_config)
            self.ping_evaluate_and_report(ping_config, result)

    def do_perf_tests(self, recipe_config):
        for perf_config in self.generate_perf_configurations(recipe_config):
            result = self.perf_test(perf_config)
            self.perf_report_and_evaluate(result)

    def generate_ping_configurations(self, config):
        for endpoint1, endpoint2 in self.generate_ping_endpoints(config):
            for ipv in self.params.ip_versions:
                ip_filter = {}
                if ipv == "ipv4":
                    ip_filter.update(family=AF_INET)
                elif ipv == "ipv6":
                    ip_filter.update(family=AF_INET6)
                    ip_filter.update(is_link_local=False)

                endpoint1_ips = endpoint1.ips_filter(**ip_filter)
                endpoint2_ips = endpoint2.ips_filter(**ip_filter)

                if len(endpoint1_ips) != len(endpoint2_ips):
                    raise LnstError(
                        "Source/destination ip lists are of different size.")

                ping_conf_list = []
                for src_addr, dst_addr in zip(endpoint1_ips, endpoint2_ips):
                    pconf = PingConf(
                        client=endpoint1.netns,
                        client_bind=src_addr,
                        destination=endpoint2.netns,
                        destination_address=dst_addr,
                        count=self.params.ping_count,
                        interval=self.params.ping_interval,
                        size=self.params.ping_psize,
                    )

                    ping_conf_list.append(pconf)

                    if self.params.ping_bidirect:
                        ping_conf_list.append(self._create_reverse_ping(pconf))

                    if not self.params.ping_parallel:
                        break

                yield ping_conf_list

    def generate_ping_endpoints(self, config):
        return []

    def generate_perf_configurations(self, config):
        for flows in self.generate_flow_combinations(config):
            perf_recipe_conf = dict(
                recipe_config=config,
                flows=flows,
            )

            flows_measurement = self.params.net_perf_tool(
                flows, perf_recipe_conf)

            cpu_measurement_hosts = set()
            for flow in flows:
                cpu_measurement_hosts.add(flow.generator)
                cpu_measurement_hosts.add(flow.receiver)

            cpu_measurement = self.params.cpu_perf_tool(
                cpu_measurement_hosts,
                perf_recipe_conf,
            )

            perf_conf = PerfRecipeConf(
                measurements=[cpu_measurement, flows_measurement],
                iterations=self.params.perf_iterations,
            )

            perf_conf.register_evaluators(cpu_measurement,
                                          self.cpu_perf_evaluators)
            perf_conf.register_evaluators(flows_measurement,
                                          self.net_perf_evaluators)

            yield perf_conf

    def generate_flow_combinations(self, config):
        for client_nic, server_nic in self.generate_perf_endpoints(config):
            for ipv in self.params.ip_versions:
                if ipv == "ipv4":
                    family = AF_INET
                elif ipv == "ipv6":
                    family = AF_INET6

                client_bind = client_nic.ips_filter(family=family)[0]
                server_bind = server_nic.ips_filter(family=family)[0]

                for perf_test in self.params.perf_tests:
                    for size in self.params.perf_msg_sizes:
                        flow = PerfFlow(
                            type=perf_test,
                            generator=client_nic.netns,
                            generator_bind=client_bind,
                            receiver=server_nic.netns,
                            receiver_bind=server_bind,
                            msg_size=size,
                            duration=self.params.perf_duration,
                            parallel_streams=self.params.perf_parallel_streams,
                            cpupin=self.params.perf_tool_cpu
                            if "perf_tool_cpu" in self.params else None)
                        yield [flow]

                        if self.params.perf_reverse:
                            reverse_flow = self._create_reverse_flow(flow)
                            yield [reverse_flow]

    def generate_perf_endpoints(self, config):
        return []

    @property
    def cpu_perf_evaluators(self):
        return []

    @property
    def net_perf_evaluators(self):
        return [NonzeroFlowEvaluator()]

    def _create_reverse_flow(self, flow):
        rev_flow = PerfFlow(type=flow.type,
                            generator=flow.receiver,
                            generator_bind=flow.receiver_bind,
                            receiver=flow.generator,
                            receiver_bind=flow.generator_bind,
                            msg_size=flow.msg_size,
                            duration=flow.duration,
                            parallel_streams=flow.parallel_streams,
                            cpupin=flow.cpupin)
        return rev_flow

    def _create_reverse_ping(self, pconf):
        return PingConf(
            client=pconf.destination,
            client_bind=pconf.destination_address,
            destination=pconf.client,
            destination_address=pconf.client_bind,
            count=pconf.ping_count,
            interval=pconf.ping_interval,
            size=pconf.ping_psize,
        )