예제 #1
0
파일: base.py 프로젝트: keroles/lisa
    def run_rtapp(cls,
                  target,
                  res_dir,
                  profile=None,
                  ftrace_coll=None,
                  cg_cfg=None):
        """
        Run the given RTA profile on the target, and collect an ftrace trace.

        :param target: target to execute the workload on.
        :type target: lisa.target.Target

        :param res_dir: Artifact folder where the artifacts will be stored.
        :type res_dir: str or lisa.utils.ArtifactPath

        :param profile: ``rt-app`` profile, as a dictionary of
            ``dict(task_name, RTATask)``. If ``None``,
            :meth:`~lisa.tests.base.RTATestBundle.get_rtapp_profile` is called
            with ``target.plat_info``.
        :type profile: dict(str, lisa.wlgen.rta.RTATask)

        :param ftrace_coll: Ftrace collector to use to record the trace. This
            allows recording extra events compared to the default one, which is
            based on the ``ftrace_conf`` class attribute.
        :type ftrace_coll: lisa.trace.FtraceCollector

        :param cg_cfg: CGroup configuration dictionary. If ``None``,
            :meth:`lisa.tests.base.RTATestBundle.get_cgroup_configuration` is
            called with ``target.plat_info``.
        :type cg_cfg: dict
        """

        trace_path = ArtifactPath.join(res_dir, cls.TRACE_PATH)
        dmesg_path = ArtifactPath.join(res_dir, cls.DMESG_PATH)
        ftrace_coll = ftrace_coll or FtraceCollector.from_conf(
            target, cls.ftrace_conf)
        dmesg_coll = DmesgCollector(target)

        profile = profile or cls.get_rtapp_profile(target.plat_info)
        cg_cfg = cg_cfg or cls.get_cgroup_configuration(target.plat_info)

        wload = RTA.by_profile(target,
                               "rta_{}".format(cls.__name__.lower()),
                               profile,
                               res_dir=res_dir)
        cgroup = cls._target_configure_cgroup(target, cg_cfg)
        as_root = cgroup is not None

        # Pre-hit the calibration information, in case this is a lazy value.
        # This avoids polluting the trace and the dmesg output with the
        # calibration tasks. Since we know that rt-app will always need it for
        # anything useful, it's reasonable to do it here.
        target.plat_info['rtapp']['calib']

        with dmesg_coll, ftrace_coll, target.freeze_userspace():
            wload.run(cgroup=cgroup, as_root=as_root)

        ftrace_coll.get_trace(trace_path)
        dmesg_coll.get_trace(dmesg_path)
        return trace_path
예제 #2
0
    def _run_rtapp(cls,
                   target,
                   res_dir,
                   profile,
                   ftrace_coll=None,
                   cg_cfg=None):
        wload = RTA.by_profile(target,
                               "rta_{}".format(cls.__name__.lower()),
                               profile,
                               res_dir=res_dir)

        trace_path = ArtifactPath.join(res_dir, cls.TRACE_PATH)
        dmesg_path = ArtifactPath.join(res_dir, cls.DMESG_PATH)
        ftrace_coll = ftrace_coll or FtraceCollector.from_conf(
            target, cls.ftrace_conf)
        dmesg_coll = DmesgCollector(target)

        cgroup = cls._target_configure_cgroup(target, cg_cfg)
        as_root = cgroup is not None

        with dmesg_coll, ftrace_coll, target.freeze_userspace():
            wload.run(cgroup=cgroup, as_root=as_root)

        ftrace_coll.get_trace(trace_path)
        dmesg_coll.get_trace(dmesg_path)
        return trace_path
예제 #3
0
    def _plot_util(self):
        trace = self.trace
        analysis = trace.analysis.load_tracking
        fig, axes = analysis.setup_plot(nrows=len(self.rtapp_tasks))
        for task, axis in zip(self.rtapp_tasks, axes):
            analysis.plot_task_signals(task, signals=['util'], axis=axis)
            trace.analysis.rta.plot_phases(task,
                                           axis=axis,
                                           wlgen_profile=self.rtapp_profile)

            activation_axis = axis.twinx()
            trace.analysis.tasks.plot_task_activation(task,
                                                      duty_cycle=True,
                                                      overlay=True,
                                                      alpha=0.2,
                                                      axis=activation_axis)

            df_activations = trace.analysis.tasks.df_task_activation(task)
            df_util = analysis.df_task_signal(task, 'util')

            def compute_means(row):
                start = row.name
                end = start + row['duration']
                phase_activations = df_window(df_activations, (start, end))
                phase_util = df_window(df_util, (start, end))
                series = pd.Series({
                    'Phase duty cycle average':
                    series_mean(phase_activations['duty_cycle']),
                    'Phase util tunnel average':
                    kernel_util_mean(
                        phase_util['util'],
                        plat_info=self.plat_info,
                    ),
                })
                return series

            df_means = trace.analysis.rta.df_phases(task).apply(compute_means,
                                                                axis=1)
            df_means = series_refit_index(df_means, window=trace.window)
            df_means['Phase duty cycle average'].plot(drawstyle='steps-post',
                                                      ax=activation_axis)
            df_means['Phase util tunnel average'].plot(drawstyle='steps-post',
                                                       ax=axis)
            activation_axis.legend()
            axis.legend()

        filepath = ArtifactPath.join(self.res_dir, 'tasks_util.png')
        analysis.save_plot(fig, filepath=filepath)

        filepath = ArtifactPath.join(self.res_dir, 'cpus_util.png')
        cpus = sorted(self.cpus)
        analysis.plot_cpus_signals(cpus, signals=['util'], filepath=filepath)
예제 #4
0
    def _plot_util(self):
        analysis = self.trace.analysis.load_tracking
        fig, axes = analysis.setup_plot(nrows=len(self.rtapp_tasks))
        for task, axis in zip(self.rtapp_tasks, axes):
            analysis.plot_task_signals(task, signals=['util'], axis=axis)
            self.trace.analysis.rta.plot_phases(task, axis=axis)

        filepath = ArtifactPath.join(self.res_dir, 'tasks_util.png')
        analysis.save_plot(fig, filepath=filepath)

        filepath = ArtifactPath.join(self.res_dir, 'cpus_util.png')
        cpus = sorted(self.cpus)
        analysis.plot_cpus_signals(cpus, signals=['util'], filepath=filepath)
예제 #5
0
    def _early_init(self,
                    target,
                    name,
                    res_dir,
                    json_file,
                    log_stats=False,
                    trace_events=None):
        """
        Initialize everything that is not related to the contents of the json file
        """
        super().__init__(target, name, res_dir)
        self.log_stats = log_stats
        self.trace_events = trace_events or []

        if not json_file:
            json_file = f'{self.name}.json'

        self.local_json = ArtifactPath.join(self.res_dir, json_file)
        self.remote_json = self.target.path.join(self.run_dir, json_file)

        rta_cmd = self.target.which('rt-app')
        if not rta_cmd:
            raise RuntimeError("No rt-app executable found on the target")

        self.command = f'{quote(rta_cmd)} {quote(self.remote_json)} 2>&1'
예제 #6
0
    def reset(self):
        """
        Reset energy meter and start sampling from channels specified in the
        target configuration.
        """
        logger = self.get_logger()
        # Terminate already running iio-capture instance (if any)
        wait_for_termination = 0
        for proc in psutil.process_iter():
            if self._iiocapturebin not in proc.cmdline():
                continue
            for channel in self._channels:
                if self._iio_device(channel) in proc.cmdline():
                    logger.debug('Killing previous iio-capture for {}'.format(
                                 self._iio_device(channel)))
                    logger.debug(proc.cmdline())
                    proc.kill()
                    wait_for_termination = 2

        # Wait for previous instances to be killed
        sleep(wait_for_termination)

        # Start iio-capture for all channels required
        for channel in self._channels:
            ch_id = self._channels[channel]

            # Setup CSV file to collect samples for this channel
            csv_file = ArtifactPath.join(self._res_dir, 'samples_{}.csv'.format(channel))

            # Start a dedicated iio-capture instance for this channel
            self._iio[ch_id] = Popen(['stdbuf', '-i0', '-o0', '-e0',
                                      self._iiocapturebin, '-n',
                                      self._hostname, '-o',
                                      '-c', '-f',
                                      str(csv_file),
                                      self._iio_device(channel)],
                                     stdout=PIPE, stderr=STDOUT,
                                     universal_newlines=True)

        # Wait some time before to check if there is any output
        sleep(1)

        # Check that all required channels have been started
        for channel in self._channels:
            ch_id = self._channels[channel]

            self._iio[ch_id].poll()
            if self._iio[ch_id].returncode:
                logger.error('Failed to run {} for {}'.format(
                   self._iiocapturebin, self._str(channel)
                 ))
                logger.warning('Make sure there are no iio-capture processes connected to {} and device {}'.format(self._hostname, self._str(channel)))
                out, _ = self._iio[ch_id].communicate()
                logger.error('Output: {}'.format(out.strip()))
                self._iio[ch_id] = None
                raise RuntimeError('iio-capture connection error')

        logger.debug('Started {} on {}...'.format(
                     self._iiocapturebin, self._str(channel)))
        self.reset_time = time.monotonic()
예제 #7
0
    def _from_target(cls,
                     target: Target,
                     *,
                     res_dir: ArtifactPath = None,
                     freq_count_limit=5) -> 'UserspaceSanity':
        """
        Factory method to create a bundle using a live target

        :param freq_count_limit: The maximum amount of frequencies to test
        :type freq_count_limit: int

        This will run Sysbench at different frequencies using the userspace
        governor
        """
        sanity_items = []

        dmesg_path = ArtifactPath.join(res_dir, cls.DMESG_PATH)
        dmesg_coll = DmesgCollector(target)

        plat_info = target.plat_info
        with dmesg_coll, target.cpufreq.use_governor("userspace"):
            for domain in plat_info['freq-domains']:
                cpu = domain[0]
                freqs = plat_info['freqs'][cpu]

                if len(freqs) > freq_count_limit:
                    freqs = freqs[::len(freqs) // freq_count_limit +
                                  (1 if len(freqs) % 2 else 0)]

                for freq in freqs:
                    item_res_dir = ArtifactPath.join(res_dir,
                                                     f'CPU{cpu}@{freq}')
                    os.makedirs(item_res_dir)
                    item = UserspaceSanityItem.from_target(
                        target=target,
                        cpu=cpu,
                        freq=freq,
                        res_dir=item_res_dir,
                        # We already did that once and for all, so that we
                        # don't spend too much time endlessly switching back
                        # and forth between governors
                        switch_governor=False,
                    )
                    sanity_items.append(item)

        dmesg_coll.get_data(dmesg_path)
        return cls(res_dir, plat_info, sanity_items)
예제 #8
0
    def _build_invariance_items(cls, target, res_dir, ftrace_coll):
        """
        Yield a :class:`InvarianceItem` for a subset of target's
        frequencies, for one CPU of each capacity class.

        This is a generator function.

        :rtype: Iterator[:class:`InvarianceItem`]
        """
        plat_info = target.plat_info

        def pick_cpu(filtered_class, cpu_class):
            try:
                return filtered_class[0]
            except IndexError:
                raise RuntimeError(
                    'All CPUs of one capacity class have been blacklisted: {}'.
                    format(cpu_class))

        # pick one CPU per class of capacity
        cpus = [
            pick_cpu(filtered_class, cpu_class)
            for cpu_class, filtered_class in zip(
                plat_info['capacity-classes'],
                cls.filter_capacity_classes(plat_info))
        ]

        logger = cls.get_logger()
        logger.info('Selected one CPU of each capacity class: {}'.format(cpus))
        for cpu in cpus:
            all_freqs = target.cpufreq.list_frequencies(cpu)
            # If we have loads of frequencies just test a cross-section so it
            # doesn't take all day
            freq_list = all_freqs[::len(all_freqs) // 8 +
                                  (1 if len(all_freqs) % 2 else 0)]

            # Make sure we have increasing frequency order, to make the logs easier
            # to navigate
            freq_list.sort()

            for freq in freq_list:
                item_dir = ArtifactPath.join(
                    res_dir, "{prefix}_{cpu}@{freq}".format(
                        prefix=InvarianceItem.task_prefix,
                        cpu=cpu,
                        freq=freq,
                    ))
                os.makedirs(item_dir)

                logger.info('Running experiment for CPU {}@{}'.format(
                    cpu, freq))
                yield InvarianceItem.from_target(
                    target,
                    cpu,
                    freq,
                    all_freqs,
                    res_dir=item_dir,
                    ftrace_coll=ftrace_coll,
                )
예제 #9
0
    def run(self, cpus=None, cgroup=None, background=False, as_root=False, timeout=None):
        """
        Execute the workload on the configured target.

        :param cpus: CPUs on which to restrict the workload execution (taskset)
        :type cpus: list(int)

        :param cgroup: cgroup in which to run the workload
        :type cgroup: str

        :param background: Whether to run the workload in background or not
        :type background: bool

        :param as_root: Whether to run the workload as root or not
        :type as_root: bool

        :param timeout: Timeout in seconds for the workload execution.
        :type timeout: int

        :raise devlib.exception.TimeoutError: When the specified ``timeout`` is hit.

        The standard output will be saved into a file in ``self.res_dir``
        """
        logger = self.get_logger()
        if not self.command:
            raise RuntimeError("Workload does not specify any command to execute")

        _command = self.command
        target = self.target

        if cpus:
            taskset_bin = target.which('taskset')
            if not taskset_bin:
                raise RuntimeError("Could not find 'taskset' executable on the target")

            cpumask = list_to_mask(cpus)
            taskset_cmd = '{} {}'.format(quote(taskset_bin), quote('0x{:x}'.format(cpumask)))
            _command = '{} {}'.format(taskset_cmd, _command)

        if cgroup:
            _command = target.cgroups.run_into_cmd(cgroup, _command)

        _command = 'cd {} && {}'.format(quote(self.run_dir), _command)

        logger.info("Execution start: {}".format(_command))

        if background:
            target.background(_command, as_root=as_root)
        else:
            self.output = target.execute(_command, as_root=as_root, timeout=timeout)
            logger.info("Execution complete")

            logfile = ArtifactPath.join(self.res_dir, 'output.log')
            logger.debug('Saving stdout to {}...'.format(logfile))

            with open(logfile, 'w') as ofile:
                ofile.write(self.output)
예제 #10
0
    def _plot_pelt(self, task, signal_name, simulated, test_name):
        trace = self.trace

        axis = trace.analysis.load_tracking.plot_task_signals(task, signals=[signal_name])
        simulated.plot(ax=axis, drawstyle='steps-post', label=f'simulated {signal_name}')

        activation_axis = axis.twinx()
        trace.analysis.tasks.plot_task_activation(task, alpha=0.2, axis=activation_axis, duration=True)

        axis.legend()

        path = ArtifactPath.join(self.res_dir, f'{test_name}_{signal_name}.png')
        trace.analysis.load_tracking.save_plot(axis.get_figure(), filepath=path)
예제 #11
0
    def _plot_pelt(self, task, signal_name, simulated, test_name):
        trace = self.trace

        kwargs = dict(always_save=False, interactive=False)

        axis = trace.analysis.load_tracking.plot_task_signals(task, signals=[signal_name], **kwargs)
        simulated.plot(ax=axis, drawstyle='steps-post', label='simulated {}'.format(signal_name))
        trace.analysis.tasks.plot_task_activation(task, alpha=0.2, axis=axis, **kwargs)

        axis.legend()

        path = ArtifactPath.join(self.res_dir, '{}_{}.png'.format(test_name, signal_name))
        trace.analysis.load_tracking.save_plot(axis.get_figure(), filepath=path)
예제 #12
0
    def _plot_expected_util(self, util_df, nrg_model):
        """
        Create a plot of the expected per-CPU utilization for the experiment
        The plot is then output to the test results directory.

        :param experiment: The :class:Experiment to examine
        :param util_df: A Pandas Dataframe with a column per CPU giving their
                        (expected) utilization at each timestamp.

        :param nrg_model: EnergyModel used to get the CPU from
        :type nrg_model: EnergyModel
        """
        analysis = self.trace.analysis.tasks
        fig, ax = analysis.setup_plot(
            nrows=len(nrg_model.cpus),
            ncols=1,
            height=1.8,
        )

        fig.suptitle('Per-CPU expected utilization')

        for cpu in nrg_model.cpus:
            tdf = util_df[cpu]

            ax[cpu].set_ylim((0, 1024))
            tdf.plot(ax=ax[cpu],
                     drawstyle='steps-post',
                     title=f"CPU{cpu}",
                     color='red')
            ax[cpu].set_ylabel('Utilization')

            # Grey-out areas where utilization == 0
            ffill = False
            prev = 0.0
            for time, util in tdf.items():
                if ffill:
                    ax[cpu].axvspan(prev,
                                    time,
                                    facecolor='gray',
                                    alpha=0.1,
                                    linewidth=0.0)
                    ffill = False
                if util == 0.0:
                    ffill = True

                prev = time

        filepath = ArtifactPath.join(self.res_dir, 'expected_placement.png')
        analysis.save_plot(fig, filepath=filepath)
예제 #13
0
    def _plot_expected_util(self, util_df, nrg_model):
        """
        Create a plot of the expected per-CPU utilization for the experiment
        The plot is then output to the test results directory.

        :param experiment: The :class:Experiment to examine
        :param util_df: A Pandas Dataframe with a column per CPU giving their
                        (expected) utilization at each timestamp.

        :param nrg_model: EnergyModel used to get the CPU from
        :type nrg_model: EnergyModel
        """

        fig, ax = plt.subplots(len(nrg_model.cpus),
                               1,
                               figsize=(16, 1.8 * len(nrg_model.cpus)))
        fig.suptitle('Per-CPU expected utilization')

        for cpu in nrg_model.cpus:
            tdf = util_df[cpu]

            ax[cpu].set_ylim((0, 1024))
            tdf.plot(ax=ax[cpu],
                     drawstyle='steps-post',
                     title="CPU{}".format(cpu),
                     color='red')
            ax[cpu].set_ylabel('Utilization')

            # Grey-out areas where utilization == 0
            ffill = False
            prev = 0.0
            for time, util in tdf.items():
                if ffill:
                    ax[cpu].axvspan(prev,
                                    time,
                                    facecolor='gray',
                                    alpha=0.1,
                                    linewidth=0.0)
                    ffill = False
                if util == 0.0:
                    ffill = True

                prev = time

        figname = ArtifactPath.join(self.res_dir, 'expected_placement.png')
        plt.savefig(figname, bbox_inches='tight')
        plt.close()
예제 #14
0
    def _early_init(self, target, name, res_dir, json_file):
        """
        Initialize everything that is not related to the contents of the json file
        """
        super().__init__(target, name, res_dir)

        if not json_file:
            json_file = '{}.json'.format(self.name)

        self.local_json = ArtifactPath.join(self.res_dir, json_file)
        self.remote_json = self.target.path.join(self.run_dir, json_file)

        rta_cmd = self.target.which('rt-app')
        if not rta_cmd:
            raise RuntimeError("No rt-app executable found on the target")

        self.command = '{0:s} {1:s} 2>&1'.format(rta_cmd, self.remote_json)
예제 #15
0
    def _create_test_bundle_item(cls, target, res_dir, ftrace_coll, item_cls,
                                 boost, prefer_idle):
        """
        Creates and returns a TestBundle for a given item class, and a given
        schedtune configuration
        """
        item_dir = ArtifactPath.join(res_dir, f'boost_{boost}_prefer_idle_{int(prefer_idle)}')
        os.makedirs(item_dir)

        logger = cls.get_logger()
        logger.info(f'Running {item_cls.__name__} with boost={boost}, prefer_idle={prefer_idle}')
        return item_cls.from_target(target,
            boost=boost,
            prefer_idle=prefer_idle,
            res_dir=item_dir,
            ftrace_coll=ftrace_coll
        )
예제 #16
0
    def _init_plat_info(self, plat_info=None, name=None, **kwargs):

        if plat_info is None:
            plat_info = PlatformInfo()
        else:
            # Make a copy of the PlatformInfo so we don't modify the original
            # one we were passed when adding the target source to it
            plat_info = copy.copy(plat_info)
            self.logger.info(
                f'User-defined platform information:\n{plat_info}')

        # Take the board name from the target configuration so it becomes
        # available for later inspection. That board name is mostly free form
        # and no specific value should be expected for a given kind of board
        # (i.e. a Juno board might be named "foo-bar-juno-on-my-desk")
        if name:
            plat_info.add_src('target-conf', dict(name=name))

        rta_calib_res_dir = ArtifactPath.join(self._res_dir, 'rta_calib')
        os.makedirs(rta_calib_res_dir, exist_ok=True)
        plat_info.add_target_src(self, rta_calib_res_dir, **kwargs)
        self.plat_info = plat_info
예제 #17
0
파일: load_tracking.py 프로젝트: credp/lisa
    def _build_invariance_items(cls, target, res_dir, **kwargs):
        """
        Yield a :class:`InvarianceItemBase` for a subset of target's
        frequencies, for one CPU of each capacity class.

        This is a generator function.

        :Variable keyword arguments: Forwarded to :meth:`InvarianceItemBase.from_target`

        :rtype: Iterator[:class:`InvarianceItemBase`]
        """
        plat_info = target.plat_info

        def pick_cpu(filtered_class, cpu_class):
            try:
                return filtered_class[0]
            except IndexError:
                raise RuntimeError(
                    f'All CPUs of one capacity class have been ignored: {cpu_class}'
                )

        # pick one CPU per class of capacity
        cpus = [
            pick_cpu(filtered_class, cpu_class)
            for cpu_class, filtered_class in zip(
                plat_info['capacity-classes'],
                cls.filter_capacity_classes(plat_info))
        ]

        def select_freqs(cpu):
            all_freqs = plat_info['freqs'][cpu]

            def interpolate(start, stop, nr):
                step = (stop - start) / (nr - 1)
                return [start + i * step for i in range(nr)]

            # Select the higher freq no matter what
            selected_freqs = {max(all_freqs)}

            available_freqs = set(all_freqs) - selected_freqs
            nr_freqs = cls.NR_FREQUENCIES - len(selected_freqs)
            for ideal_freq in interpolate(min(all_freqs), max(all_freqs),
                                          nr_freqs):

                if not available_freqs:
                    break

                # Select the freq closest to ideal
                selected_freq = min(available_freqs,
                                    key=lambda freq: abs(freq - ideal_freq))
                available_freqs.discard(selected_freq)
                selected_freqs.add(selected_freq)

            return all_freqs, sorted(selected_freqs)

        cpu_freqs = {cpu: select_freqs(cpu) for cpu in cpus}

        logger = cls.get_logger()
        logger.info('Will run on: {}'.format(', '.join(
            f'CPU{cpu}@{freq}'
            for cpu, (all_freqs, freq_list) in sorted(cpu_freqs.items())
            for freq in freq_list)))

        for cpu, (all_freqs, freq_list) in sorted(cpu_freqs.items()):
            for freq in freq_list:
                item_dir = ArtifactPath.join(
                    res_dir, f"{InvarianceItemBase.task_prefix}_{cpu}@{freq}")
                os.makedirs(item_dir)

                logger.info(f'Running experiment for CPU {cpu}@{freq}')
                yield cls.ITEM_CLS.from_target(
                    target,
                    cpu=cpu,
                    freq=freq,
                    freq_list=all_freqs,
                    res_dir=item_dir,
                    **kwargs,
                )
예제 #18
0
    def _from_target(cls,
                     target: Target,
                     *,
                     res_dir: ArtifactPath = None,
                     seed=None,
                     nr_operations=100,
                     sleep_min_ms=10,
                     sleep_max_ms=100,
                     max_cpus_off=sys.maxsize) -> 'HotplugBase':
        """
        :param seed: Seed of the RNG used to create the hotplug sequences
        :type seed: int

        :param nr_operations: Number of operations in the sequence
        :type nr_operations: int

        :param sleep_min_ms: Minimum sleep duration between hotplug operations
        :type sleep_min_ms: int

        :param sleep_max_ms: Maximum sleep duration between hotplug operations
          (0 would lead to no sleep)
        :type sleep_max_ms: int

        :param max_cpus_off: Maximum number of CPUs hotplugged out at any given
          moment
        :type max_cpus_off: int
        """

        # Instantiate a generator so we can change the seed without any global
        # effect
        random_gen = random.Random()
        random_gen.seed(seed)

        target.hotplug.online_all()
        hotpluggable_cpus = target.hotplug.list_hotpluggable_cpus()

        sequence = list(
            cls.cpuhp_seq(nr_operations, hotpluggable_cpus, max_cpus_off,
                          random_gen))

        cls._check_cpuhp_seq_consistency(nr_operations, hotpluggable_cpus,
                                         max_cpus_off, sequence)

        do_hotplug = cls._cpuhp_func(target, res_dir, sequence, sleep_min_ms,
                                     sleep_max_ms, random_gen)

        # We don't want a timeout but we do want to detect if/when the target
        # stops responding. So handle the hotplug remote func in a separate
        # thread and keep polling the target
        thread = Thread(target=do_hotplug, daemon=True)

        dmesg_path = ArtifactPath.join(res_dir, cls.DMESG_PATH)
        dmesg_coll = DmesgCollector(target)
        with dmesg_coll:
            try:
                thread.start()
                while thread.is_alive():
                    # We might have a thread hanging off in that case, but there is
                    # not much we can do since the remote func cannot really be
                    # canceled. Since it was spawned with a timeout, it will
                    # eventually die.
                    if not target.check_responsive():
                        break
                    sleep(0.1)
            finally:
                target_alive = bool(target.check_responsive())
                target.hotplug.online_all()

        dmesg_coll.get_data(dmesg_path)

        live_cpus = target.list_online_cpus() if target_alive else []
        return cls(res_dir, target.plat_info, target_alive, hotpluggable_cpus,
                   live_cpus)
예제 #19
0
 def dmesg_path(self):
     """
     Path to the dmesg output log file
     """
     return ArtifactPath.join(self.res_dir, self.DMESG_PATH)
예제 #20
0
 def trace_path(self):
     """
     Path to the ``trace-cmd report`` trace.dat file.
     """
     return ArtifactPath.join(self.res_dir, self.TRACE_PATH)
예제 #21
0
 def _filepath(cls, res_dir):
     return ArtifactPath.join(res_dir, "{}.yaml".format(cls.__qualname__))
예제 #22
0
    def __exit__(self, *exc_info):
        inner_exit = self._bg_cmd.__exit__
        wload = self.wload
        logger = self.get_logger()

        try:
            suppress = inner_exit(*exc_info)
        except BaseException as e:
            exc_info = (type(e), e, e.__traceback__)
        else:
            if suppress:
                exc_info = (None, None, None)

        type_, value, traceback = exc_info

        returncode = self._bg_cmd.poll()

        if exc_info[0] is not None:
            try:
                self.gen.throw(*exc_info)
            except StopIteration as e:
                if e is value:
                    self._output = _NotSet(e)
                    return False
                else:
                    self._output = e.value
                    return True
            except BaseException as e:
                # Place a "bomb" value: if the user tries to access
                # "self.output", the exception will be raised again
                self._output = _NotSet(e)
                # __exit__ is not expected to re-raise the exception it was
                # given, instead it returns a falsy value to indicate it should
                # not be swallowed
                if e is value:
                    return False
                else:
                    raise
            # This cannot happen: throw() has to raise the exception or swallow
            # it and then later raise a StopIteration because it is finished
            else:
                assert False
        else:
            try:
                futures = self._futures
            except ValueError:
                results = dict(stdout=None, stderr=None)
            else:
                results = {
                    name: future.result()
                    for name, future in futures.items()
                }
                if wload._settings['log_std_streams']:
                    # Dump the stdout/stderr content to log files for easier
                    # debugging
                    for name, content in results.items():
                        path = ArtifactPath.join(wload.res_dir, f'{name}.log')
                        logger.debug(f'Saving {name} to {path}...')

                        with open(path, 'wb') as f:
                            f.write(content)

            # For convenience and to avoid depending too much on devlib's
            # BackgroundCommand in simple cases
            results['returncode'] = returncode

            if returncode:
                action = lambda: self.gen.throw(
                    CalledProcessError(
                        returncode=returncode,
                        cmd=f'<Workload {self.name}>',
                        output=results['stdout'],
                        stderr=results['stderr'],
                    )
                )
            else:
                action = lambda: self.gen.send(results)

            try:
                action()
            except StopIteration as e:
                output = e.value
            except Exception as e:
                output = _NotSet(e)

            self._output = output
예제 #23
0
파일: load_tracking.py 프로젝트: wisen/lisa
    def _test_behaviour(self, signal_name, error_margin_pct,
                        allowed_error_pct):
        res = ResultBundle.from_bool(True)
        task = self.rtapp_profile[self.task_name]
        phase = task.phases[0]
        cpu = phase.cpus[0]

        # Note: This test-case is only valid if executed at capacity == 1024.
        # The below assertion is insufficient as it only checks the CPU can potentially
        # reach a capacity of 1024.
        assert self.plat_info["cpu-capacities"][cpu] == UTIL_SCALE

        # Due to simplifications in the PELT simulator :class:`bart.pelt.Simulator` and
        # general misalignment between the simulated PELT signal and the traced one the
        # error margin has to be fairly generous. The error sources include:
        #
        # 1. Misaligned PELT period boundaries (error <= 22)
        # 2. Inaccurate duty cycle of rt_app task which can't be compensated for in the
        #    PELT simulator as it only speaks integer PELT periods.
        # 3. Nearest sample can be up to 512us off (error <= 11).
        #
        # The combined error depends on the properties of the task being traced/modelled.
        # For a 50% 16ms period task that amounts to 5-7% error on TC2.

        peltsim, _, sim_df = self.get_simulated_pelt(cpu, signal_name)
        signal_df = self.get_task_sched_signal(cpu, signal_name)

        trace_duty_cycle = self.get_task_duty_cycle_pct(
            self.trace, self.task_name, cpu)
        requested_duty_cycle = phase.duty_cycle_pct

        # Do a bit of plotting
        fig, axes = plt.subplots(2, 1, figsize=(32, 10), sharex=True)
        self._plot_behaviour(axes[0], signal_df[signal_name], "Trace signal",
                             trace_duty_cycle)
        self._plot_behaviour(axes[1], sim_df.pelt_value, "Expected signal",
                             requested_duty_cycle)

        figname = ArtifactPath.join(self.res_dir,
                                    '{}_behaviour.png'.format(signal_name))
        plt.savefig(figname, bbox_inches='tight')
        plt.close()

        # Compare actual PELT signal with the simulated one
        errors = 0

        for entry in signal_df.iterrows():
            trace_val = entry[1][signal_name]
            timestamp = entry[0]

            sim_val_loc = sim_df.index.get_loc(timestamp, method='nearest')
            sim_val = sim_df.pelt_value.iloc[sim_val_loc]

            if not self.is_almost_equal(trace_val, sim_val, error_margin_pct):
                errors += 1

        error_pct = (errors / len(signal_df)) * 100
        res.add_metric("Error stats", {
            "total": TestMetric(errors),
            "pct": TestMetric(error_pct)
        })
        # Exclude possible outliers (these may be due to a kernel thread that
        # for some reason gets coscheduled with our workload).
        if error_pct > allowed_error_pct:
            res.result = Result.FAILED

        return res
예제 #24
0
    def __init__(
        self,
        kind,
        name='<noname>',
        tools=[],
        res_dir=None,
        plat_info=None,
        lazy_platinfo=False,
        workdir=None,
        device=None,
        host=None,
        port=None,
        username=None,
        password=None,
        keyfile=None,
        strict_host_check=None,
        devlib_platform=None,
        devlib_excluded_modules=[],
        wait_boot=True,
        wait_boot_timeout=10,
    ):
        # pylint: disable=dangerous-default-value
        super().__init__()
        logger = self.get_logger()

        self.name = name

        res_dir = res_dir if res_dir else self._get_res_dir(
            root=os.path.join(LISA_HOME, RESULT_DIR),
            relative='',
            name=f'{self.__class__.__qualname__}-{self.name}',
            append_time=True,
            symlink=True)

        self._res_dir = res_dir
        os.makedirs(self._res_dir, exist_ok=True)
        if os.listdir(self._res_dir):
            raise ValueError(f'res_dir must be empty: {self._res_dir}')

        if plat_info is None:
            plat_info = PlatformInfo()
        else:
            # Make a copy of the PlatformInfo so we don't modify the original
            # one we were passed when adding the target source to it
            plat_info = copy.copy(plat_info)
            logger.info(f'User-defined platform information:\n{plat_info}')

        self.plat_info = plat_info

        # Take the board name from the target configuration so it becomes
        # available for later inspection. That board name is mostly free form
        # and no specific value should be expected for a given kind of board
        # (i.e. a Juno board might be named "foo-bar-juno-on-my-desk")
        if name:
            self.plat_info.add_src('target-conf', dict(name=name))

        self._installed_tools = set()
        self.target = self._init_target(
            kind=kind,
            name=name,
            workdir=workdir,
            device=device,
            host=host,
            port=port,
            username=username,
            password=password,
            keyfile=keyfile,
            strict_host_check=strict_host_check,
            devlib_platform=devlib_platform,
            wait_boot=wait_boot,
            wait_boot_timeout=wait_boot_timeout,
        )

        devlib_excluded_modules = set(devlib_excluded_modules)
        # Sorry, can't let you do that. Messing with cgroups in a systemd
        # system is pretty bad idea.
        if self._uses_systemd:
            logger.warning(
                'Will not load cgroups devlib module: target is using systemd, which already uses cgroups'
            )
            devlib_excluded_modules.add('cgroups')

        self._devlib_loadable_modules = _DEVLIB_AVAILABLE_MODULES - devlib_excluded_modules

        # Initialize binary tools to deploy
        if tools:
            logger.info(f'Tools to install: {tools}')
            self.install_tools(tools)

        # Autodetect information from the target, after the Target is
        # initialized. Expensive computations are deferred so they will only be
        # computed when actually needed.

        rta_calib_res_dir = ArtifactPath.join(self._res_dir, 'rta_calib')
        os.makedirs(rta_calib_res_dir)
        self.plat_info.add_target_src(self,
                                      rta_calib_res_dir,
                                      deferred=lazy_platinfo,
                                      fallback=True)

        logger.info(f'Effective platform information:\n{self.plat_info}')
예제 #25
0
파일: target.py 프로젝트: ambroise-arm/lisa
    def __init__(
        self,
        kind,
        name='<noname>',
        tools=[],
        res_dir=None,
        plat_info=None,
        workdir=None,
        device=None,
        host=None,
        port=None,
        username=None,
        password=None,
        keyfile=None,
        devlib_platform=None,
        devlib_excluded_modules=[],
        wait_boot=True,
        wait_boot_timeout=10,
    ):

        super().__init__()
        logger = self.get_logger()

        self.name = name

        res_dir = res_dir if res_dir else self._get_res_dir(
            root=os.path.join(LISA_HOME, RESULT_DIR),
            relative='',
            name='{}-{}'.format(self.__class__.__qualname__, self.name),
            append_time=True,
            symlink=True)

        self._res_dir = res_dir
        os.makedirs(self._res_dir, exist_ok=True)
        if os.listdir(self._res_dir):
            raise ValueError('res_dir must be empty: {}'.format(self._res_dir))

        if plat_info is None:
            plat_info = PlatformInfo()
        else:
            # Make a copy of the PlatformInfo so we don't modify the original
            # one we were passed when adding the target source to it
            plat_info = copy.copy(plat_info)
            logger.info(
                'User-defined platform information:\n{}'.format(plat_info))

        self.plat_info = plat_info

        # Take the board name from the target configuration so it becomes
        # available for later inspection. That board name is mostly free form
        # and no specific value should be expected for a given kind of board
        # (i.e. a Juno board might be named "foo-bar-juno-on-my-desk")
        if name:
            self.plat_info.add_src('target-conf', dict(name=name))

        self._installed_tools = set()
        self.target = self._init_target(
            kind=kind,
            name=name,
            workdir=workdir,
            device=device,
            host=host,
            port=port,
            username=username,
            password=password,
            keyfile=keyfile,
            devlib_platform=devlib_platform,
            devlib_excluded_modules=devlib_excluded_modules,
            wait_boot=wait_boot,
            wait_boot_timeout=wait_boot_timeout,
        )

        # Initialize binary tools to deploy
        if tools:
            logger.info('Tools to install: {}'.format(tools))
            self.install_tools(tools)

        # Autodetect information from the target, after the Target is
        # initialized. Expensive computations are deferred so they will only be
        # computed when actually needed.

        rta_calib_res_dir = ArtifactPath.join(self._res_dir, 'rta_calib')
        os.makedirs(rta_calib_res_dir)
        self.plat_info.add_target_src(self, rta_calib_res_dir, fallback=True)

        logger.info('Effective platform information:\n{}'.format(
            self.plat_info))