Beispiel #1
0
class CoalescingHWConfigMixin(BaseHWConfigMixin):
    adaptive_rx_coalescing = BoolParam(mandatory=False)
    adaptive_tx_coalescing = BoolParam(mandatory=False)

    @property
    def coalescing_hw_config_dev_list(self):
        return []

    def hw_config(self, config):
        super().hw_config(config)

        self._configure_dev_attribute(
            config,
            self.coalescing_hw_config_dev_list,
            "adaptive_rx_coalescing",
            getattr(self.params, "adaptive_rx_coalescing", None),
        )
        self._configure_dev_attribute(
            config,
            self.coalescing_hw_config_dev_list,
            "adaptive_tx_coalescing",
            getattr(self.params, "adaptive_tx_coalescing", None),
        )

    def describe_hw_config(self, config):
        desc = super().describe_hw_config(config)
        desc.extend(
            self._describe_dev_attribute(config, "adaptive_rx_coalescing")
        )
        desc.extend(
            self._describe_dev_attribute(config, "adaptive_tx_coalescing")
        )
        return desc
Beispiel #2
0
class CoalescingHWConfigMixin(BaseHWConfigMixin):
    """
    This class is an extension to the :any:`BaseEnrtRecipe` class that enables
    adaptive coalescing configuration on the devices defined by
    :attr:`coalescing_hw_config_dev_list` property.

    :param adaptive_tx_coalescing:
        (optional test parameter) boolean to enable/disable TX adaptive
        coalescing on the devices
    :param adaptive_rx_coalescing:
        (optional test parameter) boolean to enable/disable RX adaptive
        coalescing on the devices
    """

    adaptive_rx_coalescing = BoolParam(mandatory=False)
    adaptive_tx_coalescing = BoolParam(mandatory=False)

    @property
    def coalescing_hw_config_dev_list(self):
        """
        The value of this property is a list of devices for which the
        adaptive coalescing features should be configured. It has to be
        defined by a derived class.
        """
        return []

    def hw_config(self, config):
        super().hw_config(config)

        for param in ["adaptive_rx_coalescing", "adaptive_tx_coalescing"]:
            param_value = getattr(self.params, param, None)
            if param_value is not None:
                self._configure_dev_attribute(
                    config, self.coalescing_hw_config_dev_list, param,
                    param_value)

    def hw_deconfig(self, config):
        for feature in ['adaptive_rx_coalescing', 'adaptive_tx_coalescing']:
            self._deconfigure_dev_attribute(
                config,
                self.coalescing_hw_config_dev_list,
                feature,
            )

        super().hw_deconfig(config)

    def describe_hw_config(self, config):
        desc = super().describe_hw_config(config)
        for param in ["adaptive_rx_coalescing", "adaptive_tx_coalescing"]:
            desc.extend(self._describe_dev_attribute(config, param))
        return desc
class PauseFramesHWConfigMixin(BaseHWConfigMixin):
    """
    This class is an extension to the :any:`BaseEnrtRecipe` class to configure
    the Ethernet pause frames on the devices defined by
    the :attr:`pause_frames_dev_list` property.
    """

    rx_pause_frames = BoolParam(mandatory=False)
    tx_pause_frames = BoolParam(mandatory=False)

    @property
    def pause_frames_dev_list(self):
        """
        The value of this property is a list of devices for which the pause
        frames should be configured. It has to be defined by a derived class.
        """
        return []

    def hw_config(self, config):
        super().hw_config(config)

        for param in ["rx_pause_frames", "tx_pause_frames"]:
            param_value = getattr(self.params, param, None)
            if param_value is not None:
                self._configure_dev_attribute(
                    config,
                    self.pause_frames_dev_list,
                    param,
                    param_value
                )

    def hw_deconfig(self, config):
        for param in ["rx_pause_frames", "tx_pause_frames"]:
            self._deconfigure_dev_attribute(
                config,
                self.pause_frames_dev_list,
                param
            )

        super().hw_deconfig(config)

    def describe_hw_config(self, config):
        desc = super().describe_hw_config(config)
        for param in ["rx_pause_frames", "tx_pause_frames"]:
            desc.extend(
                    self._describe_dev_attribute(config, param)
                    )

        return desc
Beispiel #4
0
class IperfServer(IperfBase):
    bind = IpParam()
    port = IntParam()
    cpu_bind = IntParam()
    opts = StrParam()
    oneoff = BoolParam(default=False)

    _role = "server"

    def _compose_cmd(self):
        bind = ""
        port = ""

        if "bind" in self.params:
            bind = "-B {}".format(self.params.bind)

        if "port" in self.params:
            port = "-p {}".format(self.params.port)

        if "cpu_bind" in self.params:
            cpu = "-A {:d}".format(self.params.cpu_bind)
        else:
            cpu = ""

        if "oneoff" in self.params and self.params.oneoff:
            oneoff = "-1"

        cmd = "iperf3 -s {bind} -J {port} {cpu} {oneoff} {opts}".format(
            bind=bind,
            port=port,
            cpu=cpu,
            oneoff=oneoff,
            opts=self.params.opts if "opts" in self.params else "")

        return cmd
Beispiel #5
0
class DropCachesPerfTestMixin(BasePerfTestIterationTweakMixin):
    """
    This mixin class is an extension to the :any:`BaseEnrtRecipe` class that can
    be used to drop vm caches before running each iteration of the performance
    measurements.

    :param drop_caches:
        (optional test parameter) a boolean, if set to True, the memory caches
        are dropped otherwise the mixin has no effect
    """

    drop_caches = BoolParam(default=False)

    def generate_perf_test_iteration_tweak_description(self, perf_config):
        description = super().generate_perf_test_iteration_tweak_description(
            perf_config)
        if self.params.drop_caches:
            for host in self.matched:
                description.append(
                    "dropped vm caches before iteration on host {}".format(
                        host.hostid))
        else:
            description.append("skipped dropping vm caches before iteration")
        return description

    def apply_perf_test_iteration_tweak(self, perf_config):
        super().apply_perf_test_iteration_tweak(perf_config)

        if self.params.drop_caches:
            for host in self.matched:
                host.run("echo 1 > /proc/sys/vm/drop_caches")

    def remove_perf_test_iteration_tweak(self, perf_config):
        super().remove_perf_test_iteration_tweak(perf_config)
Beispiel #6
0
class PerfReversibleFlowMixin(object):
    """ Mixin class for reversing the performance test flows

    This only really makes sense for recipes that have asymmetric endpoints.

    For example:

    SimpleNetworkRecipe is symmetrical since both endpoints are of the same type
    (both plain interfaces).

    TeamRecipe is asymmetrical because one endpoint is a team device and the
     other is a plain interface.

    So TeamRecipe could use this mixin to indicate that is can be reversed.

    This can be controlled by the perf_reverse parameter:

    :param perf_reverse:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        specify that the flow of traffic between the endpoints should be reversed.
    :type perf_reverse: :any:`BoolParam` (default False)
    """
    perf_reverse = BoolParam(default=False)

    def _create_perf_flow(self, perf_test, client_nic, client_bind, server_nic,
                          server_bind, msg_size) -> PerfFlow:
        if self.params.perf_reverse:
            return super()._create_perf_flow(perf_test, server_nic,
                                             server_bind, client_nic,
                                             client_bind, msg_size)
        else:
            return super()._create_perf_flow(perf_test, client_nic,
                                             client_bind, server_nic,
                                             server_bind, msg_size)
Beispiel #7
0
class DoubleTeamRecipe(CommonHWSubConfigMixin, OffloadSubConfigMixin,
    BaseEnrtRecipe):
    host1 = HostReq()
    host1.eth0 = DeviceReq(label="tnet", driver=RecipeParam("driver"))
    host1.eth1 = DeviceReq(label="tnet", driver=RecipeParam("driver"))

    host2 = HostReq()
    host2.eth0 = DeviceReq(label="tnet", driver=RecipeParam("driver"))
    host2.eth1 = DeviceReq(label="tnet", driver=RecipeParam("driver"))

    offload_combinations = Param(default=(
        dict(gro="on", gso="on", tso="on", tx="on"),
        dict(gro="off", gso="on", tso="on", tx="on"),
        dict(gro="on", gso="off", tso="off", tx="on"),
        dict(gro="on", gso="on", tso="off", tx="off")))

    perf_reverse = BoolParam(default=True)
    runner_name = StrParam(mandatory=True)

    def test_wide_configuration(self):
        host1, host2 = self.matched.host1, self.matched.host2

        net_addr_1 = "192.168.10"
        net_addr6_1 = "fc00:0:0:1"
        for i, host in enumerate([host1, host2]):
            #The config argument needs to be used with a team device
            #normally (e.g  to specify the runner mode), but it is not used
            #here due to a bug in the TeamDevice module
            host.team0 = TeamDevice()
            for dev in [host.eth0, host.eth1]:
                dev.down()
                host.team0.slave_add(dev)
            host.team0.ip_add(ipaddress(net_addr_1 + "." + str(i+1) +
                "/24"))
            host.team0.ip_add(ipaddress(net_addr6_1 + "::" + str(i+1) +
                "/64"))
            for dev in [host.eth0, host.eth1, host.team0]:
                dev.up()

        configuration = super().test_wide_configuration()
        configuration.test_wide_devices = [host1.team0, host2.team0]

        self.wait_tentative_ips(configuration.test_wide_devices)

        return configuration

    def generate_test_wide_description(self, config):
        host1, host2 = self.matched.host1, self.matched.host2
        desc = super().generate_test_wide_description(config)
        desc += [
            "\n".join([
                "Configured {}.{}.ips = {}".format(
                    dev.host.hostid, dev.name, dev.ips
                )
                for dev in config.test_wide_devices
            ]),
            "\n".join([
                "Configured {}.{}.slaves = {}".format(
                    dev.host.hostid, dev.name,
                    ['.'.join([dev.host.hostid, slave.name])
                    for slave in dev.slaves]
                )
                for dev in config.test_wide_devices
            ]),
            "\n".join([
                "Configured {}.{}.runner_name = {}".format(
                    dev.host.hostid, dev.name, dev.config
                )
                for dev in config.test_wide_devices
            ])
        ]
        return desc

    def test_wide_deconfiguration(self, config):
        del config.test_wide_devices

        super().test_wide_deconfiguration(config)

    def generate_ping_endpoints(self, config):
        return [(self.matched.host1.team0, self.matched.host2.team0),
            (self.matched.host2.team0, self.matched.host1.team0)]

    def generate_perf_endpoints(self, config):
        return [(self.matched.host1.team0, self.matched.host2.team0),
            (self.matched.host2.team0, self.matched.host1.team0)]

    def wait_tentative_ips(self, devices):
        def condition():
            return all(
                [not ip.is_tentative for dev in devices for ip in dev.ips]
            )

        self.ctl.wait_for_condition(condition, timeout=5)

    @property
    def offload_nics(self):
        return [self.matched.host1.team0, self.matched.host2.team0]

    @property
    def mtu_hw_config_dev_list(self):
        return [self.matched.host1.team0, self.matched.host2.team0]

    @property
    def coalescing_hw_config_dev_list(self):
        host1, host2 = self.matched.host1, self.matched.host2
        return [host1.eth0, host1.eth1, host2.eth0, host2.eth1]

    @property
    def dev_interrupt_hw_config_dev_list(self):
        host1, host2 = self.matched.host1, self.matched.host2
        return [host1.eth0, host1.eth1, host2.eth0, host2.eth1]

    @property
    def parallel_stream_qdisc_hw_config_dev_list(self):
        host1, host2 = self.matched.host1, self.matched.host2
        return [host1.eth0, host1.eth1, host2.eth0, host2.eth1]
Beispiel #8
0
class BaseEnrtRecipe(SctpFirewallPerfTestMixin, BaseSubConfigMixin,
                     PingTestAndEvaluate, PerfRecipe):
    """Base Recipe class for the ENRT recipe package

    This class defines the shared *test* method defining the common test
    procedure in a very generic way. This common test procedure involves a
    single main *test_wide* configuration that is different for every specific
    scenario. After the main configuration there is usually a loop of several
    minor *sub* configrations types that can take different values to slightly
    change the tested use cases.

    Finally, for each combination of a **test_wide** + **sub** configuration we
    do a several ping connection test and several performance measurement tests.

    **test_wide** and **sub** configurations are implemented with **context
    manager** methods which ensure that if any exceptions are raised (for
    example because of a bug in the recipe) that deconfiguration is called.

    Both **test_wide** and **sub** configurations are to be implemented in
    different classes, the BaseEnrtRecipe class only defines the common API and
    the base versions of the relevant methods.

    Test wide configuration is implemented via the following methods:

    * :any:`test_wide_configuration`
    * :any:`test_wide_deconfiguration`
    * :any:`generate_test_wide_description`

    Sub configurations are **mixed into** classes defining the specific
    scenario that is being tested. Various sub configurations are implemented as
    individual Python **Mixin** classes in the
    :any:`ConfigMixins<config_mixins>` package. These make use of Pythons
    collaborative inheritance by calling the `super` function in a specific way.
    The "machinery" for that is defined in the :any:`BaseSubConfigMixin` class.
    It is then used in this class from the `test` method loop.

    :param driver:
        The driver parameter is used to modify the hw network requirements,
        specifically to request Devices using the specified driver. This is
        common enough in the Enrt recipes that it can be part of the Base class.

    :type driver: :any:`StrParam` (default "ixgbe")

    :param ip_versions:
        Parameter that determines which IP protocol versions will be tested.
    :type ip_versions: Tuple[Str] (default ("ipv4", "ipv6"))

    :param ping_parallel:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator method to create :any:`PingConf` objects that will
        be run in parallel.
    :type ping_parallel: :any:`BoolParam` (default False)

    :param ping_bidirect:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator method to create :any:`PingConf` objects for both
        directions between the ping endpoints.
    :type ping_bidirect: :any:`BoolParam` (default False)

    :param ping_count:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator how many pings should be sent for each ping test.
    :type ping_count: :any:`IntParam` (default 100)

    :param ping_interval:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator how fast should the pings be sent in each ping test.
    :type ping_interval: :any:`FloatParam` (default 0.2)

    :param ping_psize:
        Parameter used by the :any:`generate_ping_configurations` generator.
        Tells the generator how big should the pings packets be in each ping
        test.
    :type ping_psize: :any:`IntParam` (default None)

    :param perf_tests:
        Parameter used by the :any:`generate_flow_combinations` generator.
        Tells the generator what types of network flow measurements to generate
        perf test configurations for.
    :type perf_tests: Tuple[str] (default ("tcp_stream", "udp_stream",
        "sctp_stream"))

    :param perf_tool_cpu:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        indicate that the flow measurement should be pinned to a specific CPU
        core.
    :type perf_tool_cpu: :any:`IntParam` (optional parameter)

    :param perf_duration:
        Parameter used by the :any:`generate_perf_configurations` generator. To
        specify the duration of the performance measurements, in seconds.
    :type perf_duration: :any:`IntParam` (default 60)

    :param perf_iterations:
        Parameter used by the :any:`generate_perf_configurations` generator. To
        specify how many times should each performance measurement be repeated
        to generate cumulative results which can be statistically analyzed.
    :type perf_iterations: :any:`IntParam` (default 5)

    :param perf_parallel_streams:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        specify how many parallel streams of the same network flow should be
        measured at the same time.
    :type perf_parallel_streams: :any:`IntParam` (default 1)

    :param perf_msg_sizes:
        Parameter used by the :any:`generate_flow_combinations` generator. To
        specify what different message sizes (in bytes) used generated for the
        network flow should be tested - each message size resulting in a
        separate performance measurement.
    :type perf_msg_sizes: List[Int] (default [123])

    :param net_perf_tool:
        Parameter used by the :any:`generate_perf_configurations` generator to
        create a PerfRecipeConf object.
        Specifies a network flow measurement class that accepts :any:`PerfFlow`
        objects and can be used to measure those specified flows
    :type net_perf_tool: :any:`BaseFlowMeasurement` (default
        IperfFlowMeasurement)

    :param cpu_perf_tool:
        Parameter used by the :any:`generate_perf_configurations` generator to
        create a PerfRecipeConf object.
        Specifies a cpu measurement class that can be used to measure CPU
        utilization on specified hosts.
    :type cpu_perf_tool: :any:`BaseCPUMeasurement` (default StatCPUMeasurement)
    """

    driver = StrParam(default="ixgbe")

    #common test parameters
    ip_versions = Param(default=("ipv4", "ipv6"))

    #common ping test params
    ping_parallel = BoolParam(default=False)
    ping_bidirect = BoolParam(default=False)
    ping_count = IntParam(default=100)
    ping_interval = FloatParam(default=0.2)
    ping_psize = IntParam(default=56)

    #common perf test params
    perf_tests = Param(default=("tcp_stream", "udp_stream", "sctp_stream"))
    perf_tool_cpu = IntParam(mandatory=False)
    perf_duration = IntParam(default=60)
    perf_iterations = IntParam(default=5)
    perf_parallel_streams = IntParam(default=1)
    perf_msg_sizes = ListParam(default=[123])

    net_perf_tool = Param(default=IperfFlowMeasurement)
    cpu_perf_tool = Param(default=StatCPUMeasurement)

    def test(self):
        """Main test loop shared by all the Enrt recipes

        The test loop involves a single application of a **test_wide**
        configuration, then a loop over multiple **sub** configurations that
        involves:

        * creating the combined sub configuration of all available SubConfig
          Mixin classes via :any:`generate_sub_configurations`
        * applying the generated sub configuration via the :any:`_sub_context`
          context manager method
        * running tests
        * removing the current sub configuration via the :any:`_sub_context`
          context manager method
        """
        with self._test_wide_context() as main_config:
            for sub_config in self.generate_sub_configurations(main_config):
                with self._sub_context(sub_config) as recipe_config:
                    self.do_tests(recipe_config)

    @contextmanager
    def _test_wide_context(self):
        config = self.test_wide_configuration()
        self.describe_test_wide_configuration(config)
        try:
            yield config
        finally:
            self.test_wide_deconfiguration(config)

    def test_wide_configuration(self):
        """Creates an empty :any:`EnrtConfiguration` object

        This is again used in potential collaborative inheritance design that
        may potentially be useful for Enrt recipes. Derived classes will each
        individually add their own values to the instance created here. This way
        the complete test wide configuration is tracked in a single object.

        :return: returns a config object that tracks the applied configuration
            that can be used during testing to inspect the current state and
            make test decisions based on it.
        :rtype: :any:`EnrtConfiguration`

        Example::

            class Derived:
                def test_wide_configuration():
                    config = super().test_wide_configuration()

                    # ... configure something
                    config.something = what_was_configured

                    return config
        """
        return EnrtConfiguration()

    def test_wide_deconfiguration(self, config):
        """Base deconfiguration method.

        In the base class this should maybe only check if there's any leftover
        configuration and warn about it. In derived classes this can be
        overriden to take care of deconfiguring what was configured in the
        respective test_wide_configuration method.

        Example::

            class Derived:
                def test_wide_deconfiguration(config):
                    # ... deconfigure something
                    del config.something #cleanup tracking

                    return super().test_wide_deconfiguration()
        """
        #TODO check if anything is still applied and throw exception?
        return

    def describe_test_wide_configuration(self, config):
        """Describes the current test wide configuration

        Creates a new result object that contains the description of the full
        test wide configuration applied by all the
        :any:`test_wide_configuration` methods in the class hierarchy.

        The description needs to be generated by the
        :any:`generate_test_wide_description` method. Additionally the
        description contains the state of all the parameters and their values
        passed to the recipe class instance during initialization.
        """
        description = self.generate_test_wide_description(config)
        self.add_result(
            True, "Summary of used Recipe parameters:\n{}".format(
                pprint.pformat(self.params._to_dict())))
        self.add_result(True, "\n".join(description))

    def generate_test_wide_description(self, config):
        """Generates the test wide configuration description

        Another class inteded to be used with the collaborative version of the
        `super` method to cumulatively desribe the full test wide configuration
        that was applied through multiple classes.

        The base class version of this method creates the initial list of
        strings containing just the header line. Each string added to this list
        will later be printed on its own line.

        :return: list of strings, each representing a single line
        :rtype: List[str]

        Example::

            class Derived:
                def generate_sub_configuration_description(config):
                    desc = super().generate_sub_configuration_description(config)
                    desc.append("Configured something: {}".format(config.something))
                    return desc
        """
        return [
            "Testwide configuration for recipe {} description:".format(
                self.__class__.__name__)
        ]

    @contextmanager
    def _sub_context(self, config):
        self.apply_sub_configuration(config)
        self.describe_sub_configuration(config)
        try:
            yield config
        finally:
            self.remove_sub_configuration(config)

    def describe_sub_configuration(self, config):
        description = self.generate_sub_configuration_description(config)
        self.add_result(True, "\n".join(description))

    def do_tests(self, recipe_config):
        """Entry point for actual tests

        The common scenario is to do ping and performance tests, however the
        method can be overriden to add more tests if needed.
        """
        self.do_ping_tests(recipe_config)
        self.do_perf_tests(recipe_config)

    def do_ping_tests(self, recipe_config):
        """Ping testing loop

        Loops over all various ping configurations generated by the
        :any:`generate_ping_configurations` method, then uses the PingRecipe
        methods to execute, report and evaluate the results.
        """
        for ping_configs in self.generate_ping_configurations(recipe_config):
            result = self.ping_test(ping_configs)
            self.ping_report_and_evaluate(result)

    def describe_perf_test_tweak(self, perf_config):
        description = self.generate_perf_test_tweak_description(perf_config)
        self.add_result(True, "\n".join(description))

    def do_perf_tests(self, recipe_config):
        """Performance testing loop

        Loops over all various perf configurations generated by the
        :any:`generate_perf_configurations` method, then uses the PerfRecipe
        methods to execute, report and evaluate the results.
        """
        for perf_config in self.generate_perf_configurations(recipe_config):
            self.apply_perf_test_tweak(perf_config)
            self.describe_perf_test_tweak(perf_config)
            try:
                result = self.perf_test(perf_config)
                self.perf_report_and_evaluate(result)
            finally:
                self.remove_perf_test_tweak(perf_config)

    def generate_ping_configurations(self, config):
        """Base ping test configuration generator

        The generator loops over all endpoint pairs to test ping between
        (generated by the :any:`generate_ping_endpoints` method) then over all
        the selected :any:`ip_versions` and finally over all the IP addresses
        that fit those criteria.

        :return: list of Ping configurations to test in parallel
        :rtype: List[:any:`PingConf`]
        """
        for endpoints in self.generate_ping_endpoints(config):
            for ipv in self.params.ip_versions:
                if ipv == "ipv6" and not endpoints.reachable:
                    continue

                ip_filter = {}
                if ipv == "ipv4":
                    ip_filter.update(family=AF_INET)
                elif ipv == "ipv6":
                    ip_filter.update(family=AF_INET6)
                    ip_filter.update(is_link_local=False)

                endpoint1, endpoint2 = endpoints.endpoints
                endpoint1_ips = endpoint1.ips_filter(**ip_filter)
                endpoint2_ips = endpoint2.ips_filter(**ip_filter)

                if len(endpoint1_ips) != len(endpoint2_ips):
                    raise LnstError(
                        "Source/destination ip lists are of different size.")

                ping_conf_list = []
                for src_addr, dst_addr in zip(endpoint1_ips, endpoint2_ips):
                    pconf = PingConf(
                        client=endpoint1.netns,
                        client_bind=src_addr,
                        destination=endpoint2.netns,
                        destination_address=dst_addr,
                        count=self.params.ping_count,
                        interval=self.params.ping_interval,
                        size=self.params.ping_psize,
                    )

                    ping_evaluators = self.generate_ping_evaluators(
                        pconf, endpoints)
                    pconf.register_evaluators(ping_evaluators)

                    ping_conf_list.append(pconf)

                    if self.params.ping_bidirect:
                        ping_conf_list.append(self._create_reverse_ping(pconf))

                    if not self.params.ping_parallel:
                        break

                yield ping_conf_list

    def generate_ping_endpoints(self, config):
        """Generator for ping endpoints

        To be overriden by a derived class.

        :return: list of device pairs
        :rtype: List[Tuple[:any:`Device`, :any:`Device`]]
        """
        return []

    def generate_ping_evaluators(self, pconf, endpoints):
        return [RatePingEvaluator(min_rate=50)]

    def generate_perf_configurations(self, config):
        """Base perf test configuration generator

        The generator loops over all flow combinations to measure performance
        for (generated by the :any:`generate_flow_combinations` method). In
        addition to that during each flow combination measurement we add CPU
        utilization measurement to run on the background.

        Finally for each generated perf test configuration we register
        measurement evaluators based on the :any:`cpu_perf_evaluators` and
        :any:`net_perf_evaluators` properties.

        :return: list of Perf test configurations
        :rtype: List[:any:`PerfRecipeConf`]
        """
        for flows in self.generate_flow_combinations(config):
            perf_recipe_conf = dict(
                recipe_config=config,
                flows=flows,
            )

            flows_measurement = self.params.net_perf_tool(
                flows, perf_recipe_conf)

            cpu_measurement_hosts = set()
            for flow in flows:
                cpu_measurement_hosts.add(flow.generator)
                cpu_measurement_hosts.add(flow.receiver)

            cpu_measurement = self.params.cpu_perf_tool(
                cpu_measurement_hosts,
                perf_recipe_conf,
            )

            perf_conf = PerfRecipeConf(
                measurements=[cpu_measurement, flows_measurement],
                iterations=self.params.perf_iterations,
            )

            perf_conf.register_evaluators(cpu_measurement,
                                          self.cpu_perf_evaluators)
            perf_conf.register_evaluators(flows_measurement,
                                          self.net_perf_evaluators)

            yield perf_conf

    def generate_flow_combinations(self, config):
        """Base flow combination generator

        The generator loops over all endpoint pairs to test performance between
        (generated by the :any:`generate_perf_endpoints` method) then over all
        the selected :any:`ip_versions` and uses the first IP address fitting
        these criteria. Then the generator loops over the selected performance
        tests as selected via :any:`perf_tests`, then message sizes from
        :any:`msg_sizes`.

        :return: list of Flow combinations to measure in parallel
        :rtype: List[:any:`PerfFlow`]
        """
        for client_nic, server_nic in self.generate_perf_endpoints(config):
            for ipv in self.params.ip_versions:
                ip_filter = {}
                if ipv == "ipv4":
                    ip_filter.update(family=AF_INET)
                elif ipv == "ipv6":
                    ip_filter.update(family=AF_INET6)
                    ip_filter.update(is_link_local=False)

                client_bind = client_nic.ips_filter(**ip_filter)[0]
                server_bind = server_nic.ips_filter(**ip_filter)[0]

                for perf_test in self.params.perf_tests:
                    for size in self.params.perf_msg_sizes:
                        yield [
                            self._create_perf_flow(
                                perf_test,
                                client_nic,
                                client_bind,
                                server_nic,
                                server_bind,
                                size,
                            )
                        ]

    def _create_perf_flow(self, perf_test, client_nic, client_bind, server_nic,
                          server_bind, msg_size) -> PerfFlow:
        """
        Wrapper to create a PerfFlow. Mixins that want to change this behavior (for example, to reverse the direction)
        can override this method as an alternative to overriding :any:`generate_flow_combinations`
        """
        cpupin = self.params.perf_tool_cpu if "perf_tool_cpu" in self.params else None
        return PerfFlow(
            type=perf_test,
            generator=client_nic.netns,
            generator_bind=client_bind,
            generator_nic=client_nic,
            receiver=server_nic.netns,
            receiver_bind=server_bind,
            receiver_nic=server_nic,
            msg_size=msg_size,
            duration=self.params.perf_duration,
            parallel_streams=self.params.perf_parallel_streams,
            cpupin=cpupin,
        )

    def generate_perf_endpoints(self, config):
        """Generator for perf endpoints

        To be overriden by a derived class.

        :return: list of device pairs
        :rtype: List[Tuple[:any:`Device`, :any:`Device`]]
        """
        return []

    @property
    def cpu_perf_evaluators(self):
        """CPU measurement evaluators

        To be overriden by a derived class. Returns the list of evaluators to
        use for CPU utilization measurement evaluation.

        :return: a list of cpu evaluator objects
        :rtype: List[BaseEvaluator]
        """
        return []

    @property
    def net_perf_evaluators(self):
        """Network flow measurement evaluators

        To be overriden bby a derived class. Returns the list of evaluators to
        use for Network flow measurement evaluation.

        :return: a list of flow evaluator objects
        :rtype: List[BaseEvaluator]
        """
        return [NonzeroFlowEvaluator()]

    def wait_tentative_ips(self, devices):
        def condition():
            return all(
                [not ip.is_tentative for dev in devices for ip in dev.ips])

        self.ctl.wait_for_condition(condition, timeout=5)

    def _create_reverse_ping(self, pconf):
        return PingConf(
            client=pconf.destination,
            client_bind=pconf.destination_address,
            destination=pconf.client,
            destination_address=pconf.client_bind,
            count=pconf.ping_count,
            interval=pconf.ping_interval,
            size=pconf.ping_psize,
        )
Beispiel #9
0
class PacketAssert(BaseTestModule):
    interface = DeviceParam(mandatory=True)
    p_filter = StrParam(default="")
    grep_for = ListParam(default=[])
    promiscuous = BoolParam(default=False)
    _grep_exprs = []
    _p_recv = 0

    def _prepare_grep_exprs(self):
        for expr in self.params.grep_for:
            if expr is not None:
                self._grep_exprs.append(expr)

    def _compose_cmd(self):
        cmd = "tcpdump"
        if not self.params.promiscuous:
            cmd += " -p"
        iface = self.params.interface.name
        filt = self.params.p_filter
        cmd += ' -nn -i %s "%s"' % (iface, filt)

        return cmd

    def _check_line(self, line):
        if line != "":
            for exp in self._grep_exprs:
                if not re.search(exp, line):
                    return
            self._p_recv += 1

    def run(self):
        self._res_data = {}
        if not is_installed("tcpdump"):
            self._res_data["msg"] = "tcpdump is not installed on this machine!"
            logging.error(self._res_data["msg"])
            return False

        self._prepare_grep_exprs()
        cmd = self._compose_cmd()
        logging.debug("compiled command: {}".format(cmd))

        packet_assert_process = subprocess.Popen(
            cmd,
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            close_fds=True,
        )

        try:
            self.wait_for_interrupt()
        except:
            raise LnstError("Could not handle interrupt properly.")

        stdout, stderr = packet_assert_process.communicate()
        stdout = stdout.decode()
        stderr = stderr.decode()

        self._res_data["stderr"] = stderr
        # tcpdump always reports information to stderr, there may be actual
        # errors but also just generic debug information
        logging.debug(self._res_data["stderr"])

        for line in stdout.split("\n"):
            self._check_line(line)

        logging.debug("Capturing finised. Received %d packets." % self._p_recv)
        self._res_data["p_recv"] = self._p_recv

        if packet_assert_process.returncode != 0:
            return False
        else:
            return True
Beispiel #10
0
class PacketAssert(BaseTestModule):
    interface = DeviceParam(mandatory=True)
    p_filter = StrParam(default='')
    grep_for = ListParam(default=[])
    promiscuous = BoolParam(default=False)
    _grep_exprs = []
    _p_recv = 0

    def _prepare_grep_exprs(self):
        for expr in self.params.grep_for:
            if expr is not None:
                self._grep_exprs.append(expr)

    def _compose_cmd(self):
        cmd = "tcpdump"
        if not self.params.promiscuous:
            cmd += " -p"
        iface = self.params.interface.name
        filt = self.params.p_filter
        cmd += " -nn -i %s \"%s\"" % (iface, filt)

        return cmd

    def _check_line(self, line):
        if line != "":
            for exp in self._grep_exprs:
                if not re.search(exp, line):
                    return
            self._p_recv += 1

    def _is_real_err(self, err):

        ignore_exprs = [r"tcpdump: verbose output suppressed, use -v or -vv for full protocol decode",
                        r"listening on %s, link-type .* \(.*\), capture size [0-9]* bytes" %
                        self.params.interface.name, r"\d+ packets captured",
                        r"\d+ packets received by filter", r"\d+ packets dropped by kernel"]

        for line in err.split('\n'):
            if not line:
                continue
            match = False
            for expr in ignore_exprs:
                if re.search(expr, line):
                    match = True
                    break
            if not match:
                return True
        return False

    def run(self):
        self._res_data = {}
        if not is_installed("tcpdump"):
            self._res_data["msg"] = "tcpdump is not installed on this machine!"
            logging.error(self._res_data["msg"])
            return False

        self._prepare_grep_exprs()
        cmd = self._compose_cmd()
        logging.debug("compiled command: {}".format(cmd))

        packet_assert_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE, close_fds=True)

        try:
            self.wait_for_interrupt()
        except:
            raise LnstError("Could not handle interrupt properly.")

        with packet_assert_process.stdout, packet_assert_process.stderr:
            stderr=packet_assert_process.stderr.read().decode()
            stdout=packet_assert_process.stdout.read().decode()

        self._res_data["stderr"] = stderr

        if self._is_real_err(stderr):
            self._res_data["msg"] = "errors reported by tcpdump"
            logging.error(self._res_data["msg"])
            logging.error(self._res_data["stderr"])
            return False

        for line in stdout.split("\n"):
            self._check_line(line)

        logging.debug("Capturing finised. Received %d packets." % self._p_recv)
        self._res_data["p_recv"] = self._p_recv

        return True
Beispiel #11
0
class IperfClient(IperfBase):
    server = HostnameOrIpParam(mandatory=True)
    duration = IntParam(default=10)
    udp = BoolParam(default=False)
    sctp = BoolParam(default=False)
    port = IntParam()
    blksize = IntParam()
    mss = IntParam()
    cpu_bind = IntParam()
    parallel = IntParam()
    opts = StrParam()

    _role = "client"

    def __init__(self, **kwargs):
        super(IperfClient, self).__init__(**kwargs)

        if self.params.udp and self.params.sctp:
            raise TestModuleError(
                "Parameters udp and sctp are mutually exclusive!")

    def runtime_estimate(self):
        _duration_overhead = 5
        return self.params.duration + _duration_overhead

    def _compose_cmd(self):
        port = ""

        if "port" in self.params:
            port = "-p {:d}".format(self.params.port)

        if "blksize" in self.params:
            blksize = "-l {:d}".format(self.params.blksize)
        else:
            blksize = ""

        if "mss" in self.params:
            mss = "-M {:d}".format(self.params.mss)
        else:
            mss = ""

        if "cpu_bind" in self.params:
            cpu = "-A {:d}".format(self.params.cpu_bind)
        else:
            cpu = ""

        if "parallel" in self.params:
            parallel = "-P {:d}".format(self.params.parallel)
        else:
            parallel = ""

        if self.params.udp:
            test = "--udp"
        elif self.params.sctp:
            test = "--sctp"
        elif self.params.mptcp:
            test = "--multipath"
        else:
            test = ""

        cmd = ("iperf3 -c {server} -b 0/1000 -J -t {duration}"
               " {cpu} {test} {mss} {blksize} {parallel} {port}"
               " {opts}".format(
                   server=self.params.server,
                   duration=self.params.duration,
                   cpu=cpu,
                   test=test,
                   mss=mss,
                   blksize=blksize,
                   parallel=parallel,
                   port=port,
                   opts=self.params.opts if "opts" in self.params else ""))

        return cmd
Beispiel #12
0
class IperfBase(BaseTestModule):
    mptcp = BoolParam(default=False)

    def run(self):
        self._res_data = {}
        if not is_installed("iperf3"):
            self._res_data["msg"] = "Iperf is not installed on this machine!"
            logging.error(self._res_data["msg"])
            return False

        cmd = self._compose_cmd()

        logging.debug("compiled command: %s" % cmd)
        logging.debug("running as {} ...".format(self._role))

        server = subprocess.Popen(cmd,
                                  shell=True,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE,
                                  close_fds=True)

        try:
            stdout, stderr = server.communicate()
            stdout = stdout.decode()
            stderr = stderr.decode()
        except KeyboardInterrupt:
            pass

        try:
            self._res_data["data"] = json.loads(stdout)
        except JSONDecodeError:
            self._res_data["msg"] = "Error while parsing the iperf json output"
            self._res_data["data"] = stdout
            self._res_data["stderr"] = stderr
            logging.error(self._res_data["msg"])
            return False

        try:
            self._check_json_sanity()
        except:
            self._res_data["msg"] = "Iperf provided incomplete json data"
            self._res_data["data"] = stdout
            self._res_data["stderr"] = stderr
            logging.error(self._res_data["msg"])
            return False

        self._res_data["stderr"] = stderr

        if stderr != "":
            self._res_data["msg"] = "errors reported by iperf"
            logging.error(self._res_data["msg"])
            logging.error(self._res_data["stderr"])

        if server.returncode > 0:
            self._res_data["msg"] = "{} returncode = {}".format(
                self._role, server.returncode)
            logging.error(self._res_data["msg"])
            return False

        return True

    def _check_json_sanity(self):
        data = self._res_data["data"]
        if "start" not in data:
            raise Exception()

        if "end" not in data:
            raise Exception()

        if len(data["intervals"]) == 0:
            raise Exception()

        if "streams" not in data["end"]:
            raise Exception
Beispiel #13
0
class DisableTurboboostMixin(BaseSubConfigMixin):
    """
    This mixin class is an extension to the :any:`BaseEnrtRecipe` class that can
    be used to disable CPU turboboost on hosts before running the tests.

    Any recipe that wants to use the mixin must define the
    :attr:`disable_turboboost_host_list` property first.

    Note: The mixin uses intel_pstate sysfs interface to disable the CPU feature
    and so it is usable only by systems with Intel CPUs.

    :param disable_turboost:
        (optional test parameter) boolean to control the CPU turboboost. When
        the parameter is set to **True** the CPU turboboost is disabled on all
        hosts defined by :attr:`disable_turboboost_host_list` property. Otherwise
        this mixin has no effect.
    """

    disable_turboboost = BoolParam(default=False)

    @property
    def disable_turboboost_host_list(self):
        """
        The value of this property is a list of hosts for which the CPU turboboost
        should be turned off. Derived class can override this property.
        """
        return []

    def _is_turboboost_supported(self, host):
        file_check = host.run(
            "ls /sys/devices/system/cpu/intel_pstate/no_turbo",
            job_level=ResultLevel.DEBUG)
        return file_check.passed

    def apply_sub_configuration(self, config):
        super().apply_sub_configuration(config)

        if self.params.disable_turboboost:
            for host in self.disable_turboboost_host_list:
                if self._is_turboboost_supported(host):
                    # TODO: save previous state
                    host.run(
                        "echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo"
                    )

    def generate_sub_configuration_description(self, config):
        description = super().generate_sub_configuration_description(config)

        if self.params.disable_turboboost:
            for host in self.disable_turboboost_host_list:
                if self._is_turboboost_supported(host):
                    description.append(
                        "turboboost disabled through intel_pstate on {}".
                        format(host.hostid))
                else:
                    description.append("warning: user requested to disable turboboost "\
                            "through intel_pstate but the sysfs file is not available "\
                            "on host {}".format(host.hostid))
        else:
            description.append(
                "configuration of turboboost through intel_pstate skipped")

        return description

    def remove_sub_configuration(self, config):
        if self.params.disable_turboboost:
            for host in self.disable_turboboost_host_list:
                if self._is_turboboost_supported(host):
                    # TODO: restore previous state
                    host.run(
                        "echo 0 > /sys/devices/system/cpu/intel_pstate/no_turbo"
                    )

        return super().remove_sub_configuration(config)
Beispiel #14
0
class BaseEnrtRecipe(BaseSubConfigMixin, PingTestAndEvaluate, PerfRecipe):
    #common requirements parameters
    driver = StrParam(default="ixgbe")

    #common test parameters
    ip_versions = Param(default=("ipv4", "ipv6"))

    #common ping test params
    ping_parallel = BoolParam(default=False)
    ping_bidirect = BoolParam(default=False)
    ping_count = IntParam(default=100)
    ping_interval = StrParam(default=0.2)
    ping_psize = IntParam(default=None)

    #common perf test params
    perf_tests = Param(default=("tcp_stream", "udp_stream", "sctp_stream"))
    perf_tool_cpu = IntParam(mandatory=False)
    perf_duration = IntParam(default=60)
    perf_iterations = IntParam(default=5)
    perf_parallel_streams = IntParam(default=1)
    perf_msg_sizes = ListParam(default=[123])
    perf_reverse = BoolParam(default=False)

    net_perf_tool = Param(default=IperfFlowMeasurement)
    cpu_perf_tool = Param(default=StatCPUMeasurement)

    def test(self):
        with self._test_wide_context() as main_config:
            for sub_config in self.generate_sub_configurations(main_config):
                with self._sub_context(sub_config) as recipe_config:
                    self.do_tests(recipe_config)

    @contextmanager
    def _test_wide_context(self):
        config = self.test_wide_configuration()
        self.describe_test_wide_configuration(config)
        try:
            yield config
        finally:
            self.test_wide_deconfiguration(config)

    def test_wide_configuration(self):
        return EnrtConfiguration()

    def test_wide_deconfiguration(self, config):
        #TODO check if anything is still applied and throw exception?
        return

    def describe_test_wide_configuration(self, config):
        description = self.generate_test_wide_description(config)
        self.add_result(
            True, "Summary of used Recipe parameters:\n{}".format(
                pprint.pformat(self.params._to_dict())))
        self.add_result(True, "\n".join(description))

    def generate_test_wide_description(self, config):
        return [
            "Testwide configuration for recipe {} description:".format(
                self.__class__.__name__)
        ]

    @contextmanager
    def _sub_context(self, config):
        self.apply_sub_configuration(config)
        self.describe_sub_configuration(config)
        try:
            yield config
        finally:
            self.remove_sub_configuration(config)

    def describe_sub_configuration(self, config):
        description = self.generate_sub_configuration_description(config)
        self.add_result(True, "\n".join(description))

    def generate_sub_configuration_description(self, config):
        return ["Sub configuration description:"]

    def do_tests(self, recipe_config):
        self.do_ping_tests(recipe_config)
        self.do_perf_tests(recipe_config)

    def do_ping_tests(self, recipe_config):
        for ping_config in self.generate_ping_configurations(recipe_config):
            result = self.ping_test(ping_config)
            self.ping_evaluate_and_report(ping_config, result)

    def do_perf_tests(self, recipe_config):
        for perf_config in self.generate_perf_configurations(recipe_config):
            result = self.perf_test(perf_config)
            self.perf_report_and_evaluate(result)

    def generate_ping_configurations(self, config):
        for endpoint1, endpoint2 in self.generate_ping_endpoints(config):
            for ipv in self.params.ip_versions:
                ip_filter = {}
                if ipv == "ipv4":
                    ip_filter.update(family=AF_INET)
                elif ipv == "ipv6":
                    ip_filter.update(family=AF_INET6)
                    ip_filter.update(is_link_local=False)

                endpoint1_ips = endpoint1.ips_filter(**ip_filter)
                endpoint2_ips = endpoint2.ips_filter(**ip_filter)

                if len(endpoint1_ips) != len(endpoint2_ips):
                    raise LnstError(
                        "Source/destination ip lists are of different size.")

                ping_conf_list = []
                for src_addr, dst_addr in zip(endpoint1_ips, endpoint2_ips):
                    pconf = PingConf(
                        client=endpoint1.netns,
                        client_bind=src_addr,
                        destination=endpoint2.netns,
                        destination_address=dst_addr,
                        count=self.params.ping_count,
                        interval=self.params.ping_interval,
                        size=self.params.ping_psize,
                    )

                    ping_conf_list.append(pconf)

                    if self.params.ping_bidirect:
                        ping_conf_list.append(self._create_reverse_ping(pconf))

                    if not self.params.ping_parallel:
                        break

                yield ping_conf_list

    def generate_ping_endpoints(self, config):
        return []

    def generate_perf_configurations(self, config):
        for flows in self.generate_flow_combinations(config):
            perf_recipe_conf = dict(
                recipe_config=config,
                flows=flows,
            )

            flows_measurement = self.params.net_perf_tool(
                flows, perf_recipe_conf)

            cpu_measurement_hosts = set()
            for flow in flows:
                cpu_measurement_hosts.add(flow.generator)
                cpu_measurement_hosts.add(flow.receiver)

            cpu_measurement = self.params.cpu_perf_tool(
                cpu_measurement_hosts,
                perf_recipe_conf,
            )

            perf_conf = PerfRecipeConf(
                measurements=[cpu_measurement, flows_measurement],
                iterations=self.params.perf_iterations,
            )

            perf_conf.register_evaluators(cpu_measurement,
                                          self.cpu_perf_evaluators)
            perf_conf.register_evaluators(flows_measurement,
                                          self.net_perf_evaluators)

            yield perf_conf

    def generate_flow_combinations(self, config):
        for client_nic, server_nic in self.generate_perf_endpoints(config):
            for ipv in self.params.ip_versions:
                if ipv == "ipv4":
                    family = AF_INET
                elif ipv == "ipv6":
                    family = AF_INET6

                client_bind = client_nic.ips_filter(family=family)[0]
                server_bind = server_nic.ips_filter(family=family)[0]

                for perf_test in self.params.perf_tests:
                    for size in self.params.perf_msg_sizes:
                        flow = PerfFlow(
                            type=perf_test,
                            generator=client_nic.netns,
                            generator_bind=client_bind,
                            receiver=server_nic.netns,
                            receiver_bind=server_bind,
                            msg_size=size,
                            duration=self.params.perf_duration,
                            parallel_streams=self.params.perf_parallel_streams,
                            cpupin=self.params.perf_tool_cpu
                            if "perf_tool_cpu" in self.params else None)
                        yield [flow]

                        if self.params.perf_reverse:
                            reverse_flow = self._create_reverse_flow(flow)
                            yield [reverse_flow]

    def generate_perf_endpoints(self, config):
        return []

    @property
    def cpu_perf_evaluators(self):
        return []

    @property
    def net_perf_evaluators(self):
        return [NonzeroFlowEvaluator()]

    def _create_reverse_flow(self, flow):
        rev_flow = PerfFlow(type=flow.type,
                            generator=flow.receiver,
                            generator_bind=flow.receiver_bind,
                            receiver=flow.generator,
                            receiver_bind=flow.generator_bind,
                            msg_size=flow.msg_size,
                            duration=flow.duration,
                            parallel_streams=flow.parallel_streams,
                            cpupin=flow.cpupin)
        return rev_flow

    def _create_reverse_ping(self, pconf):
        return PingConf(
            client=pconf.destination,
            client_bind=pconf.destination_address,
            destination=pconf.client,
            destination_address=pconf.client_bind,
            count=pconf.ping_count,
            interval=pconf.ping_interval,
            size=pconf.ping_psize,
        )