def _from_target(cls,
                     target: Target,
                     *,
                     res_dir: ArtifactPath = None,
                     collector=None) -> 'EASBehaviour':
        """
        :meta public:

        Factory method to create a bundle using a live target

        This will execute the rt-app workload described in
        :meth:`lisa.tests.base.RTATestBundle.get_rtapp_profile`
        """
        plat_info = target.plat_info
        rtapp_profile = cls.get_rtapp_profile(plat_info)

        # EAS doesn't make a lot of sense without schedutil,
        # so make sure this is what's being used
        with target.disable_idle_states():
            with target.cpufreq.use_governor("schedutil"):
                cls.run_rtapp(target,
                              res_dir,
                              rtapp_profile,
                              collector=collector)

        return cls(res_dir, plat_info)
示例#2
0
    def _from_target(
        cls,
        target: Target,
        *,
        res_dir: ArtifactPath = None,
        collector=None,
    ) -> 'UtilTrackingBase':
        plat_info = target.plat_info
        rtapp_profile = cls.get_rtapp_profile(plat_info)

        # After a bit of experimenting, it turns out that on some platforms
        # misprediction of the idle time (which leads to a shallow idle state,
        # a wakeup and another idle nap) can mess up the duty cycle of the
        # rt-app task we're running. In our case, a 50% duty cycle, 16ms period
        # task would always be active for 8ms, but it would sometimes sleep for
        # only 5 or 6 ms.
        # This is fine to do this here, as we only care about the proper
        # behaviour of the signal on running/not-running tasks.
        with target.disable_idle_states():
            with target.cpufreq.use_governor('performance'):
                cls.run_rtapp(target,
                              res_dir,
                              rtapp_profile,
                              collector=collector)

        return cls(res_dir, plat_info)
示例#3
0
    def _from_target(cls, target: Target, *, res_dir: ArtifactPath = None, seed=None,
                     nr_operations=100, sleep_min_ms=10, sleep_max_ms=100,
                     max_cpus_off=sys.maxsize) -> 'HotplugBase':
        """
        :param seed: Seed of the RNG used to create the hotplug sequences
        :type seed: int

        :param nr_operations: Number of operations in the sequence
        :type nr_operations: int

        :param sleep_min_ms: Minimum sleep duration between hotplug operations
        :type sleep_min_ms: int

        :param sleep_max_ms: Maximum sleep duration between hotplug operations
          (0 would lead to no sleep)
        :type sleep_max_ms: int

        :param max_cpus_off: Maximum number of CPUs hotplugged out at any given
          moment
        :type max_cpus_off: int
        """

        # Instantiate a generator so we can change the seed without any global
        # effect
        random_gen = random.Random()
        random_gen.seed(seed)

        target.hotplug.online_all()
        hotpluggable_cpus = target.hotplug.list_hotpluggable_cpus()

        sequence = list(cls.cpuhp_seq(
            nr_operations, hotpluggable_cpus, max_cpus_off, random_gen))

        cls._check_cpuhp_seq_consistency(nr_operations, hotpluggable_cpus,
            max_cpus_off, sequence)

        script = cls._cpuhp_script(
            target, res_dir, sequence, sleep_min_ms, sleep_max_ms, random_gen)

        script.push()

        # We don't want a timeout but we do want to detect if/when the target
        # stops responding. So start a background shell and poll on it
        try:
            # Using DEVNULL is important to prevent the command from blocking
            # on its outputs
            with script.background(as_root=True, stdout=DEVNULL, stderr=DEVNULL) as bg:
                while bg.poll() is None:
                    if not script.target.check_responsive():
                        break

                    sleep(0.1)
        finally:
            target_alive = bool(script.target.check_responsive())
            target.hotplug.online_all()

        live_cpus = target.list_online_cpus() if target_alive else []

        return cls(target.plat_info, target_alive, hotpluggable_cpus, live_cpus)
示例#4
0
    def test_cli(self):
        """
        Test that creating a Target from the CLI works
        """
        args = "--kind host --password 'foobar'"
        target = Target.from_cli(shlex.split(args))

        self.assertNotEqual(target.os, None)
示例#5
0
    def from_target(cls,
                    target: Target,
                    *,
                    res_dir: ArtifactPath = None,
                    **kwargs):
        """
        Factory method to create a bundle using a live target

        :param target: Target to connect to.
        :type target: lisa.target.Target

        :param res_dir: Host result directory holding artifacts.
        :type res_dir: str or lisa.utils.ArtifactPath

        This is mostly boiler-plate code around
        :meth:`~lisa.tests.base.TestBundle._from_target`, which lets us
        introduce common functionalities for daughter classes. Unless you know
        what you are doing, you should not override this method, but the
        internal :meth:`lisa.tests.base.TestBundle._from_target` instead.
        """
        cls.check_from_target(target)

        res_dir = res_dir or target.get_res_dir(
            name=cls.__qualname__,
            symlink=True,
        )

        # Make sure that all the relevant dmesg warnings will fire when running
        # things on the target, even if we already hit some warn_once warnings.
        with contextlib.suppress(TargetStableError):
            target.write_value('/sys/kernel/debug/clear_warn_once',
                               '1',
                               verify=False)

        bundle = cls._from_target(target, res_dir=res_dir, **kwargs)

        # We've created the bundle from the target, and have all of
        # the information we need to execute the test code. However,
        # we enforce the use of the offline reloading path to ensure
        # it does not get broken.
        if cls.VERIFY_SERIALIZATION:
            bundle.to_dir(res_dir)
            # Updating the res_dir breaks deserialization for some use cases
            bundle = cls.from_dir(res_dir, update_res_dir=False)

        return bundle
示例#6
0
def main():
    target = Target.from_cli()
    plat_info = target.plat_info

    # Make sure we get all the information we can, even if it means running for
    # a bit longer. RTapp calibration will be computed as it is a DeferredValue
    plat_info.eval_deferred()

    print(plat_info)

    path = 'plat_info.yml'
    plat_info.to_yaml_map(path)
    print('\nPlatform info written to: {}'.format(path))
示例#7
0
    def _from_target(cls, target: Target, *, res_dir: ArtifactPath = None, ftrace_coll: FtraceCollector = None, cpu=None, nr_steps=1) -> 'LargeStepUp':
        plat_info = target.plat_info

        # Use a big CPU by default to allow maximum range of utilization
        cpu = cpu if cpu is not None else plat_info["capacity-classes"][-1][0]

        rtapp_profile = cls.get_rtapp_profile(plat_info, cpu=cpu, nr_steps=nr_steps)

        # Ensure accurate duty cycle and idle state misprediction on some
        # boards. This helps having predictable execution.
        with target.disable_idle_states():
            with target.cpufreq.use_governor("schedutil"):
                cls.run_rtapp(target, res_dir, rtapp_profile, ftrace_coll=ftrace_coll)

        return cls(res_dir, plat_info, cpu, nr_steps)
示例#8
0
def main():
    params = {
        'plat-info': dict(help='Path to the PlatformInfo file to generate', )
    }
    args, target = Target.from_custom_cli(params=params)
    plat_info = target.plat_info

    # Make sure we get all the information we can, even if it means running for
    # a bit longer. RTapp calibration will be computed as it is a DeferredValue
    plat_info.eval_deferred(error='log')

    print(plat_info)

    path = args.plat_info or f'{target.name}.plat_info.yml'
    plat_info.to_yaml_map(path)
    print(f'\nPlatform info written to: {path}')
示例#9
0
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from lisa.target import Target
from devlib.module.sched import SchedDomain

target = Target.from_cli()

sd_info = target.sched.get_sd_info()

for cpuid, cpu in sd_info.cpus.items():
    print("== CPU{} ==".format(cpuid))
    for domain in cpu.domains.values():
        print("\t{} level".format(domain.name))
        for flag in domain.flags:
            print("\t\t{} - {}".format(flag.name, flag.__doc__))
示例#10
0
    def _from_target(cls,
                     target: Target,
                     *,
                     res_dir: ArtifactPath = None,
                     seed=None,
                     nr_operations=100,
                     sleep_min_ms=10,
                     sleep_max_ms=100,
                     max_cpus_off=sys.maxsize) -> 'HotplugBase':
        """
        :param seed: Seed of the RNG used to create the hotplug sequences
        :type seed: int

        :param nr_operations: Number of operations in the sequence
        :type nr_operations: int

        :param sleep_min_ms: Minimum sleep duration between hotplug operations
        :type sleep_min_ms: int

        :param sleep_max_ms: Maximum sleep duration between hotplug operations
          (0 would lead to no sleep)
        :type sleep_max_ms: int

        :param max_cpus_off: Maximum number of CPUs hotplugged out at any given
          moment
        :type max_cpus_off: int
        """

        # Instantiate a generator so we can change the seed without any global
        # effect
        random_gen = random.Random()
        random_gen.seed(seed)

        target.hotplug.online_all()
        hotpluggable_cpus = target.hotplug.list_hotpluggable_cpus()

        sequence = list(
            cls.cpuhp_seq(nr_operations, hotpluggable_cpus, max_cpus_off,
                          random_gen))

        cls._check_cpuhp_seq_consistency(nr_operations, hotpluggable_cpus,
                                         max_cpus_off, sequence)

        do_hotplug = cls._cpuhp_func(target, res_dir, sequence, sleep_min_ms,
                                     sleep_max_ms, random_gen)

        # We don't want a timeout but we do want to detect if/when the target
        # stops responding. So handle the hotplug remote func in a separate
        # thread and keep polling the target
        thread = Thread(target=do_hotplug, daemon=True)
        try:
            thread.start()
            while thread.is_alive():
                # We might have a thread hanging off in that case, but there is
                # not much we can do since the remote func cannot really be
                # canceled. Since it was spawned with a timeout, it will
                # eventually die.
                if not target.check_responsive():
                    break
                sleep(0.1)
        finally:
            target_alive = bool(target.check_responsive())
            target.hotplug.online_all()

        live_cpus = target.list_online_cpus() if target_alive else []
        return cls(target.plat_info, target_alive, hotpluggable_cpus,
                   live_cpus)
示例#11
0
    def _from_target(
            cls,
            target: Target,
            *,
            res_dir: ArtifactPath,
            ftrace_coll: FtraceCollector = None) -> 'ExampleTestBundle':
        """
        This class method is the main way of creating a :class:`ExampleTestBundle`.

        It takes a first (positional) ``target`` parameter, which is a live
        :class:`lisa.target.Target` object. It can be used to manipulate a
        remote device such as a development board, to run workloads on it,
        manipulate sysfs entries and so on.

        **All other parameters are keyword-only**
        This means they must appear after the lone ``*`` in the parameter list.

        ``res_dir`` stands for "result directory" and is a location where the
        bundle can store some artifacts collected from the target. The bundle
        can rely on that folder being populated by this method.

        The "'ExampleTestBundle'" return annotation tells the test runner that
        this class method acts as a factory of :class:`ExampleTestBundle`, so it
        will be used to assemble the test case.

        .. seealso:: The class :class:`lisa.platforms.platinfo.PlatformInfo`
            provides information about a device that are usually needed in
            tests.

        .. seealso: This methods provides an easy way of running an rt-app
            workload on the target device
            :meth:`lisa.tests.base.RTATestBundle.run_rtapp`
        """
        # PlatformInfo
        # https://lisa-linux-integrated-system-analysis.readthedocs.io/en/master/target.html#lisa.platforms.platinfo.PlatformInfo
        #
        # It's a central piece of LISA: it holds all the information about a
        # given device. Use it to access any data it contains rather than
        # fetching them yourselves, as the final user will have ways of
        # providing values in case auto-detection fails, and logging of all the
        # data it contains is provided out of the box.
        plat_info = target.plat_info

        # The rt-app profile defines the rt-app workload that will be run
        # note: If None is given to run_rtapp(), it will default to calling
        # get_rtapp_profile()
        rtapp_profile = cls.get_rtapp_profile(plat_info)

        # Here, we wanted to make sure the cpufreq governor is schedutil, since
        # that's what we want to test. This is achieved through the used of
        # devlib modules:
        # https://devlib.readthedocs.io/en/latest/modules.html
        with target.cpufreq.use_governor("schedutil"):
            # RTATestBundle.run_rtapp()
            # https://lisa-linux-integrated-system-analysis.readthedocs.io/en/master/kernel_tests.html#lisa.tests.base.RTATestBundle.run_rtapp
            #
            # It allows running the rt-app profile on the target. ftrace_coll
            # is the object used to control the recording of the trace, and is
            # setup by the test runner. This allows the final user to extend
            # the list of ftrace events collected. If no collector is provided,
            # a default one will be created by run_rtapp() based on the
            # @requires_events() decorators used on method of that
            # ExampleTestBundle. Note that it will also freeze all the tasks on
            # the target device, so that the scheduler signals are not
            # disturbed. Some critical tasks are not frozen though.
            cls.run_rtapp(target,
                          res_dir,
                          rtapp_profile,
                          ftrace_coll=ftrace_coll)

        # Execute a silly shell command on the target device as well
        output = target.execute('echo $((21+21))').split()

        # Logging must be done through the provided logger, so it integrates well in LISA.
        cls.get_logger().info('Finished doing stuff')

        # Actually create a ExampleTestBundle by calling the class.
        return cls(res_dir, plat_info, output)
示例#12
0
文件: utils.py 项目: qais-yousef/lisa
def create_local_target():
    """
    :returns: A localhost :class:`lisa.target.Target` instance
    """
    return Target.from_conf(conf=HOST_TARGET_CONF, plat_info=HOST_PLAT_INFO)
示例#13
0
    def _from_target(cls,
                     target: Target,
                     *,
                     res_dir: ArtifactPath = None,
                     seed=None,
                     nr_operations=100,
                     sleep_min_ms=10,
                     sleep_max_ms=100,
                     max_cpus_off=sys.maxsize) -> 'HotplugBase':
        """
        :param seed: Seed of the RNG used to create the hotplug sequences
        :type seed: int

        :param nr_operations: Number of operations in the sequence
        :type nr_operations: int

        :param sleep_min_ms: Minimum sleep duration between hotplug operations
        :type sleep_min_ms: int

        :param sleep_max_ms: Maximum sleep duration between hotplug operations
          (0 would lead to no sleep)
        :type sleep_max_ms: int

        :param max_cpus_off: Maximum number of CPUs hotplugged out at any given
          moment
        :type max_cpus_off: int
        """

        # Instantiate a generator so we can change the seed without any global
        # effect
        random_gen = random.Random()
        random_gen.seed(seed)

        target.hotplug.online_all()
        hotpluggable_cpus = target.hotplug.list_hotpluggable_cpus()

        sequence = list(
            cls.cpuhp_seq(nr_operations, hotpluggable_cpus, max_cpus_off,
                          random_gen))

        cls._check_cpuhp_seq_consistency(nr_operations, hotpluggable_cpus,
                                         max_cpus_off, sequence)

        script = cls._cpuhp_script(target, res_dir, sequence, sleep_min_ms,
                                   sleep_max_ms, random_gen)

        script.push()

        # We don't want a timeout but we do want to detect if/when the target
        # stops responding. So start a background shell and poll on it
        with script.background(as_root=True):
            try:
                script.wait()

                target_alive = True
                target.hotplug.online_all()
            except TargetNotRespondingError:
                target_alive = False

        live_cpus = target.list_online_cpus() if target_alive else []

        return cls(target.plat_info, target_alive, hotpluggable_cpus,
                   live_cpus)
示例#14
0
import logging
import os
from lisa.trace import FtraceCollector, Trace
from lisa.utils import setup_logging
from lisa.target import Target, TargetConf
from lisa.wlgen.rta import RTA, Periodic
from lisa.datautils import df_filter_task_ids
import pandas as pd

setup_logging()
target = Target.from_one_conf('conf/lisa/qemu_target_default.yml')
#target = Target.from_default_conf()

rtapp_profile = {}
tasks = []
for cpu in range(4):
    tasks.append("tsk{}-{}".format(cpu, cpu))
    rtapp_profile["tsk{}".format(cpu)] = Periodic(duty_cycle_pct=50,
                                                  duration_s=120)

wload = RTA.by_profile(target, "experiment_wload", rtapp_profile)

ftrace_coll = FtraceCollector(target, events=["sched_switch"])
trace_path = os.path.join(wload.res_dir, "trace.dat")
with ftrace_coll:
    wload.run()

ftrace_coll.get_trace(trace_path)
trace = Trace(trace_path, target.plat_info, events=["sched_switch"])

# sched_switch __comm  __pid  __cpu  __line prev_comm  prev_pid  prev_prio  prev_state next_comm  next_pid  next_prio