示例#1
0
 def compute_means(row):
     start = row.name
     end = start + row['duration']
     phase_activations = df_window(df_activations, (start, end))
     phase_util = df_window(df_util, (start, end))
     series = pd.Series({
         'Phase duty cycle average': series_mean(phase_activations['duty_cycle']),
         'Phase util tunnel average': kernel_util_mean(
             phase_util['util'],
             plat_info=self.plat_info,
         ),
     })
     return series
示例#2
0
    def get_trace_cpu_util(self):
        """
        Get the per-phase average CPU utilization read from the trace

        :returns: A dict of the shape {cpu : {phase_id : trace_util}}
        """
        df = self.trace.analysis.load_tracking.df_cpus_signal('util')
        tasks = self.rtapp_task_ids_map.keys()
        task = sorted(task for task in tasks if task.startswith('migr'))[0]
        task = self.rtapp_task_ids_map[task][0]

        cpu_util = {}
        for row in self.trace.analysis.rta.df_phases(task).itertuples():
            phase = row.phase
            duration = row.duration
            start = row.Index
            end = start + duration
            # Ignore the first quarter of the util signal of each phase, since
            # it's impacted by the phase change, and util can be affected
            # (rtapp does some bookkeeping at the beginning of phases)
            start += duration / 4
            phase_df = df_window(df, (start, end),
                                 method='pre',
                                 clip_window=True)

            for cpu in self.cpus:
                util = phase_df[phase_df['cpu'] == cpu]['util']
                cpu_util.setdefault(cpu, {})[phase] = kernel_util_mean(
                    util, plat_info=self.plat_info)

        return cpu_util
示例#3
0
    def df_context_switches(self):
        """
        Compute number of context switches on each CPU.

        :returns: A :class:`pandas.DataFrame` with:

          * A ``context_switch_cnt`` column (the number of context switch per CPU)
        """
        # Since we want to count the number of context switches, we don't want
        # all tasks to appear
        sched_df = self.trace.df_event('sched_switch', signals_init=False)
        # Make sure to only get the switches inside the window
        sched_df = df_window(
            sched_df,
            method='exclusive',
            window=self.trace.window,
            clip_window=False,
        )
        cpus = list(range(self.trace.cpus_count))
        ctx_sw_df = pd.DataFrame(
            [len(sched_df[sched_df['__cpu'] == cpu]) for cpu in cpus],
            index=cpus,
            columns=['context_switch_cnt'])
        ctx_sw_df.index.name = 'cpu'

        return ctx_sw_df
示例#4
0
文件: tasks.py 项目: Smilence902/lisa
    def plot_tasks_forks_heatmap(self,
                                 xbins: int = 100,
                                 colormap=None,
                                 axis=None,
                                 **kwargs):
        """
        :param xbins: Number of x-axis bins, i.e. in how many slices should
          time be arranged
        :type xbins: int

        :param colormap: The name of a colormap (see
          https://matplotlib.org/users/colormaps.html), or a Colormap object
        :type colormap: str or matplotlib.colors.Colormap
        """

        df = self.trace.df_event("sched_wakeup_new")
        df = df_window(df,
                       window=self.trace.window,
                       method='exclusive',
                       clip_window=False)

        fig, axis = self._plot_cpu_heatmap(df.index,
                                           df.target_cpu,
                                           xbins,
                                           "Number of forks",
                                           cmap=colormap,
                                           **kwargs)

        axis.set_title("Tasks forks over time")
        return axis
示例#5
0
    def df_cpu_frequency_transitions(self, cpu):
        """
        Compute number of frequency transitions of a given CPU.

        :param cpu: a CPU ID
        :type cpu: int

        :returns: A :class:`pandas.DataFrame` with:

          * A ``transitions`` column (the number of frequency transitions)
        """

        freq_df = self.df_cpu_frequency(cpu, signals_init=False)
        # Since we want to count the number of events appearing inside the
        # window, make sure we don't get anything outside it
        freq_df = df_window(
            freq_df,
            window=self.trace.window,
            method='exclusive',
            clip_window=False,
        )
        cpu_freqs = freq_df['frequency']

        # Remove possible duplicates (example: when devlib sets trace markers
        # a cpu_frequency event is triggered that can generate a duplicate)
        cpu_freqs = series_deduplicate(cpu_freqs,
                                       keep='first',
                                       consecutives=True)
        transitions = cpu_freqs.value_counts()

        transitions.name = "transitions"
        transitions.sort_index(inplace=True)

        return pd.DataFrame(transitions)
示例#6
0
文件: rta.py 项目: ambroise-arm/lisa
 def cpus_of_phase_at(t):
     window = (t, end_of_phase_at(t))
     df = df_window(states_df,
                    window,
                    method='pre',
                    clip_window=True)
     return sorted(int(x) for x in df['cpu'].unique())
示例#7
0
    def get_task_sched_signal(self, trace, cpu, task_name, signal):
        """
        Get a :class:`pandas.DataFrame` with the sched signals for the workload task

        This examines scheduler load tracking trace events. You will need a
        target kernel that includes the required events.

        :returns: :class:`pandas.DataFrame` with a column for each signal for
          the workload task
        """
        df = trace.analysis.load_tracking.df_tasks_signal(signal)
        df = df[df['comm'] == task_name]
        window = self.get_task_window(trace, task_name, cpu)
        df = df_window(df, window, method='exclusive')

        # Normalize the signal with the detected task execution start
        df.index -= window[0]

        return df
示例#8
0
    def _plot_cpu_heatmap(self, event, bins, xbins, cmap):
        """
        Plot some data in a heatmap-style 2d histogram
        """
        df = self.trace.df_event(event)
        df = df_window(df, window=self.trace.window, method='exclusive', clip_window=False)
        x = df.index
        y = df['target_cpu']

        if xbins:
            warnings.warn('"xbins" parameter is deprecated and will be removed, use "bins" instead', DeprecationWarning)
            bins = xbins

        nr_cpus = self.trace.cpus_count
        hist = np.histogram2d(y, x, bins=[nr_cpus, bins])
        z, _, x = hist
        y = list(range(nr_cpus))
        return hv.HeatMap(
            (x, y, z),
            kdims=[
                # Manually set dimension name/label so that shared_axes works
                # properly.
                # Also makes hover tooltip better.
                hv.Dimension('Time'),
                hv.Dimension('CPU'),
            ],
            vdims=[
                hv.Dimension(event),
            ]
        ).options(
            colorbar=True,
            xlabel='Time (s)',
            ylabel='CPU',
            # Viridis works both on bokeh and matplotlib
            cmap=cmap or 'Viridis',
            yticks=[
                (cpu, f'CPU{cpu}')
                for cpu in y
            ]
        )
示例#9
0
    def test_activations(self) -> ResultBundle:
        """
        Test signals are properly "aggregated" at enqueue/dequeue time.

        On fast-ramp systems, `enqueued` is expected to be always
        smaller than `ewma`.

        On non fast-ramp systems, the `enqueued` is expected to be
        smaller then `ewma` in ramp-down phases, or bigger in ramp-up
        phases.

        Those conditions are checked on a single execution of a task which has
        three main behaviours:

            * STABLE: periodic big task running for a relatively long period to
              ensure `util` saturation.
            * DOWN: periodic ramp-down task, to slowly decay `util`
            * UP: periodic ramp-up task, to slowly increase `util`

        """
        metrics = {}
        task = self.rtapp_task_ids_map['test'][0]

        # Get list of task's activations
        df = self.trace.ana.tasks.df_task_states(task)
        activations = df[(df.curr_state == TaskState.TASK_WAKING)
                         & (df.next_state == TaskState.TASK_ACTIVE)].index

        # Check task signals at each activation
        df = self.trace.df_event('sched_util_est_se')
        df = df_filter_task_ids(df, [task])

        for idx, activation in enumerate(activations):

            # Get the value of signals at their first update after the activation
            row = df_window(df, (activation, None), method='post').iloc[0]
            # It can happen that the first updated after the activation is
            # actually in the next phase, in which case we need to check the
            # util values against the right phase
            activation = row.name

            # If we are outside a phase, ignore the activation
            try:
                phase = self.trace.ana.rta.task_phase_at(
                    task, activation, wlgen_profile=self.rtapp_profile)
            except KeyError:
                continue

            util = row['util']
            enq = row['enqueued']
            ewma = row['ewma']

            def make_issue(msg):
                return msg.format(
                    util=f'util={util}',
                    enq=f'enqueued={enq}',
                    ewma=f'ewma={ewma}',
                )

            issue = None

            # UtilEst is not updated when within 1% of previous activation
            if 1.01 * enq < util:
                issue = make_issue('{enq} smaller than {util}')

            # Running on FastRamp kernels:
            elif self.fast_ramp:

                # ewma stable, down and up
                if enq > ewma:
                    issue = make_issue('{enq} bigger than {ewma}')

            # Running on (legacy) non FastRamp kernels:
            else:
                if not phase.properties['meta']['from_test']:
                    continue

                # ewma stable
                if phase.id.startswith('test/stable'):
                    if enq < ewma:
                        issue = make_issue('stable: {enq} smaller than {ewma}')

                # ewma ramping down
                elif phase.id.startswith('test/ramp_down'):
                    if enq > ewma:
                        issue = make_issue(
                            'ramp down: {enq} bigger than {ewma}')

                # ewma ramping up
                elif phase.id.startswith('test/ramp_up'):
                    if enq < ewma:
                        issue = make_issue(
                            'ramp up: {enq} smaller than {ewma}')

            metrics[idx] = ActivationSignals(activation, util, enq, ewma,
                                             issue)

        failures = [(idx, activation_signals)
                    for idx, activation_signals in metrics.items()
                    if activation_signals.issue]

        bundle = ResultBundle.from_bool(not failures)
        bundle.add_metric("failures",
                          sorted(idx for idx, activation in failures))
        bundle.add_metric("activations", metrics)

        failures_time = [activation.time for idx, activation in failures]
        self._plot_signals(task, 'activations', failures_time)
        return bundle
示例#10
0
    def get_expected_cpu_util(self):
        """
        Get the per-phase average CPU utilization expected from the duty cycle
        of the tasks found in the trace.

        :returns: A dict of the shape {cpu : {phase_id : expected_util}}

        .. note:: This is more robust than just looking at the duty cycle in
            the task profile, since rtapp might not reproduce accurately the
            duty cycle it was asked.
        """
        cpu_capacities = self.plat_info['cpu-capacities']['rtapp']
        cpu_util = {}
        cpu_freqs = self.plat_info['freqs']

        try:
            freq_df = self.trace.analysis.frequency.df_cpus_frequency()
        except MissingTraceEventError:
            cpus_rel_freq = None
        else:
            cpus_rel_freq = {
                # Frequency, normalized according to max frequency on that CPU
                cols['cpu']: df['frequency'] / max(cpu_freqs[cols['cpu']])
                for cols, df in df_split_signals(freq_df, ['cpu'])
            }

        for task in self.rtapp_task_ids:
            df = self.trace.analysis.tasks.df_task_activation(task)

            for row in self.trace.analysis.rta.df_phases(task).itertuples():
                phase = row.phase
                duration = row.duration
                start = row.Index
                end = start + duration
                # Ignore the first quarter of the util signal of each phase, since
                # it's impacted by the phase change, and util can be affected
                # (rtapp does some bookkeeping at the beginning of phases)
                # start += duration / 4

                # readjust the duration to take into account the modification of start
                duration = end - start
                window = (start, end)
                phase_df = df_window(df, window, clip_window=True)

                for cpu in self.cpus:

                    if cpus_rel_freq is None:
                        rel_freq_mean = 1
                    else:
                        phase_freq_series = df_window(cpus_rel_freq[cpu],
                                                      window=window,
                                                      clip_window=True)
                        # # We might not have frequency data at the beginning of the
                        # # trace, or if not frequency transition happened at all.
                        if phase_freq_series.empty:
                            rel_freq_mean = 1
                        else:
                            # If we lack freq data at the beginning of the
                            # window, assume the frequency was right.
                            if phase_freq_series.index[0] > start:
                                phase_freq_series = pd.concat([
                                    pd.Series([1.0], index=[start]),
                                    phase_freq_series
                                ])

                            # Extend the frequency to the right so that the mean
                            # takes into account all the data we have
                            freq_window = (phase_freq_series.index[0], end)
                            rel_freq_mean = series_mean(
                                series_refit_index(phase_freq_series,
                                                   window=freq_window))

                    cpu_phase_df = phase_df[phase_df['cpu'] == cpu].dropna()
                    if cpu_phase_df.empty:
                        duty_cycle = 0
                        cpu_residency = 0
                    else:
                        duty_cycle = series_mean(
                            df_refit_index(cpu_phase_df['duty_cycle'],
                                           window=window))
                        cpu_residency = end - max(cpu_phase_df.index[0], start)

                    phase_util = UTIL_SCALE * duty_cycle * (
                        cpu_capacities[cpu] / UTIL_SCALE)
                    # Pro-rata with the time spent on that CPU, so we get
                    # the correct average.
                    phase_util *= cpu_residency / duration

                    # We might not have run at max freq, e.g. because of
                    # thermal capping, so take that into account
                    phase_util *= rel_freq_mean

                    cpu_util.setdefault(cpu, {}).setdefault(phase, 0)
                    cpu_util[cpu][phase] += phase_util

        return cpu_util
示例#11
0
 def cpus_of_phase_at(t):
     end = t + phases_df['duration'][t]
     window = (t, end)
     df = df_window(states_df, window, method='pre')
     return sorted(int(x) for x in df['cpu'].unique())
示例#12
0
    def test_activations(self) -> ResultBundle:
        """
        Test signals are properly "aggregated" at enqueue/dequeue time.

        On fast-ramp systems, `enqueud` is expected to be always
        smaller than `ewma`.

        On non fast-ramp systems, the `enqueued` is expected to be
        smaller then `ewma` in ramp-down phases, or bigger in ramp-up
        phases.

        Those conditions are checked on a single execution of a task which has
        three main behaviours:

            * STABLE: periodic big task running for a relatively long period to
              ensure `util` saturation.
            * DOWN: periodic ramp-down task, to slowly decay `util`
            * UP: periodic ramp-up task, to slowly increase `util`

        """
        metrics = {}

        task = self.rtapp_task_ids_map['test'][0]

        # Get list of task's activations
        df = self.trace.analysis.tasks.df_task_states(task)
        activations = df[(df.curr_state == TaskState.TASK_WAKING)
                         & (df.next_state == TaskState.TASK_ACTIVE)].index

        # Check task signals at each activation
        df = self.trace.df_event('sched_util_est_se')
        df = df_filter_task_ids(df, [task])

        for idx, activation in enumerate(activations):
            # Get the value of signals at their first update after the activation
            row = df_window(df, (activation, None), method='post').iloc[0]
            util = row['util']
            enq = row['enqueued']
            ewma = row['ewma']

            def make_issue(msg):
                return msg.format(
                    util='util={}'.format(util),
                    enq='enqueud={}'.format(enq),
                    ewma='ewma={}'.format(ewma),
                )

            issue = None

            # UtilEst is not updated when within 1% of previous activation
            if 1.01 * enq < util:
                issue = make_issue('{enq} smaller than {util}')

            # Running on FastRamp kernels:
            elif self.fast_ramp:

                # ewma stable, down and up
                if enq > ewma:
                    issue = make_issue('{enq} bigger than {ewma}')

            # Running on (legacy) non FastRamp kernels:
            else:

                phase = self.trace.analysis.rta.task_phase_at(task, activation)
                # TODO: remove that once we have named phases to skip the buffer phase
                if phase.id == 0:
                    continue

                # ewma stable
                if phase.id == 1 and enq < ewma:
                    issue = make_issue('stable: {enq} smaller than {ewma}')

                # ewma ramping down
                elif phase.id <= 5 and enq > ewma:
                    issue = make_issue('ramp down: {enq} bigger than {ewma}')

                # ewma ramping up
                elif phase.id >= 6 and enq < ewma:
                    issue = make_issue('ramp up: {enq} smaller than {ewma}')

            metrics[idx] = ActivationSignals(activation, util, enq, ewma,
                                             issue)

        failures = [(idx, activation_signals)
                    for idx, activation_signals in metrics.items()
                    if activation_signals.issue]

        bundle = ResultBundle.from_bool(not failures)
        bundle.add_metric("failures",
                          sorted(idx for idx, activation in failures))
        bundle.add_metric("activations", metrics)

        failures_time = [activation.time for idx, activation in failures]
        self._plot_signals(task, 'activations', failures_time)
        return bundle
示例#13
0
    def _df_tasks_states(self, tasks=None, return_one_df=False):
        """
        Compute tasks states for all tasks.

        :param tasks: If specified, states of these tasks only will be yielded.
            The :class:`lisa.trace.TaskID` must have a ``pid`` field specified,
            since the task state is per-PID.
        :type tasks: list(lisa.trace.TaskID) or list(int)

        :param return_one_df: If ``True``, a single dataframe is returned with
            new extra columns. If ``False``, a generator is returned that
            yields tuples of ``(TaskID, task_df)``. Each ``task_df`` contains
            the new columns.
        :type return_one_df: bool
        """
        ######################################################
        # A) Assemble the sched_switch and sched_wakeup events
        ######################################################

        def get_df(event):
            # Ignore the end of the window so we can properly compute the
            # durations
            return self.trace.df_event(event, window=(self.trace.start, None))

        def filters_comm(task):
            try:
                return task.comm is not None
            except AttributeError:
                return isinstance(task, str)

        # Add the rename events if we are interested in the comm of tasks
        add_rename = any(map(filters_comm, tasks or []))

        wk_df = get_df('sched_wakeup')
        sw_df = get_df('sched_switch')

        try:
            wkn_df = get_df('sched_wakeup_new')
        except MissingTraceEventError:
            pass
        else:
            wk_df = pd.concat([wk_df, wkn_df])

        wk_df = wk_df[["pid", "comm", "target_cpu", "__cpu"]].copy(deep=False)
        wk_df["curr_state"] = TaskState.TASK_WAKING

        prev_sw_df = sw_df[["__cpu", "prev_pid", "prev_state", "prev_comm"]].copy()
        next_sw_df = sw_df[["__cpu", "next_pid", "next_comm"]].copy()

        prev_sw_df.rename(
            columns={
                "prev_pid": "pid",
                "prev_state": "curr_state",
                "prev_comm": "comm",
            },
            inplace=True
        )

        next_sw_df["curr_state"] = TaskState.TASK_ACTIVE
        next_sw_df.rename(columns={'next_pid': 'pid', 'next_comm': 'comm'}, inplace=True)

        all_sw_df = pd.concat([prev_sw_df, next_sw_df], sort=False)

        if add_rename:
            rename_df = get_df('task_rename').rename(
                columns={
                    'oldcomm': 'comm',
                },
            )[['pid', 'comm']]
            rename_df['curr_state'] = TaskState.TASK_RENAMED
            all_sw_df = pd.concat([all_sw_df, rename_df], sort=False)

        # Integer values are prefered here, otherwise the whole column
        # is converted to float64
        all_sw_df['target_cpu'] = -1

        df = pd.concat([all_sw_df, wk_df], sort=False)
        df.sort_index(inplace=True)
        df.rename(columns={'__cpu': 'cpu'}, inplace=True)

        # Restrict the set of data we will process to a given set of tasks
        if tasks is not None:
            def resolve_task(task):
                """
                Get a TaskID for each task, and only update existing TaskID if
                they lack a PID field, since that's what we care about in that
                function.
                """
                try:
                    do_update = task.pid is None
                except AttributeError:
                    do_update = False

                return self.trace.get_task_id(task, update=do_update)

            tasks = list(map(resolve_task, tasks))
            df = df_filter_task_ids(df, tasks)

        df = df_window(df, window=self.trace.window)

        # Return a unique dataframe with new columns added
        if return_one_df:
            df.sort_index(inplace=True)
            df.index.name = 'Time'
            df.reset_index(inplace=True)

            # Since sched_switch is split in two df (next and prev), we end up with
            # duplicated indices. Avoid that by incrementing them by the minimum
            # amount possible.
            df = df_update_duplicates(df, col='Time', inplace=True)

            grouped = df.groupby('pid', observed=True, sort=False)
            new_columns = dict(
                next_state=grouped['curr_state'].shift(-1, fill_value=TaskState.TASK_UNKNOWN),
                # GroupBy.transform() will run the function on each group, and
                # concatenate the resulting series to create a new column.
                # Note: We actually need transform() to chain 2 operations on
                # the group, otherwise the first operation returns a final
                # Series, and the 2nd is not applied on groups
                delta=grouped['Time'].transform(lambda time: time.diff().shift(-1)),
            )
            df = df.assign(**new_columns)
            df.set_index('Time', inplace=True)

            return df

        # Return a generator yielding (TaskID, task_df) tuples
        else:
            def make_pid_df(pid_df):
                # Even though the initial dataframe contains duplicated indices due to
                # using both prev_pid and next_pid in sched_switch event, we should
                # never end up with prev_pid == next_pid, so task-specific dataframes
                # are expected to be free from duplicated timestamps.
                # assert not df.index.duplicated().any()

                # Copy the df to add new columns
                pid_df = pid_df.copy(deep=False)

                # For each PID, add the time it spent in each state
                pid_df['delta'] = pid_df.index.to_series().diff().shift(-1)
                pid_df['next_state'] = pid_df['curr_state'].shift(-1, fill_value=TaskState.TASK_UNKNOWN)
                return pid_df

            signals = df_split_signals(df, ['pid'])
            return (
                (TaskID(pid=col['pid'], comm=None), make_pid_df(pid_df))
                for col, pid_df in signals
            )