Example #1
0
File: base.py Project: rousya/lisa
    def get_trace(self, **kwargs):
        """
        :returns: a :class:`lisa.trace.TraceView` cropped to fit the ``rt-app``
            tasks.

        :Keyword arguments: forwarded to :class:`lisa.trace.Trace`.
        """
        trace = Trace(self.trace_path, self.plat_info, **kwargs)
        return trace.get_view(self.trace_window(trace))
Example #2
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     dfs = {
         'sched_wakeup':
         pd.DataFrame.from_records(
             [
                 (0, 1, 1, 'task1', 'task1', 1, 1, 1),
                 (1, 2, 1, 'task1', 'task1', 1, 1, 2),
                 (2, 4, 2, 'task2', 'task2', 2, 1, 4),
             ],
             columns=('Time', '__cpu', '__pid', '__comm', 'comm', 'pid',
                      'prio', 'target_cpu'),
             index='Time',
         ),
     }
     self.trace = Trace(parser=MockTraceParser(dfs, time_range=(0, 42)))
Example #3
0
    def get_trace(self, **kwargs):
        """
        :returns: a :class:`lisa.trace.Trace` collected in the standard location.

        :Variable keyword arguments: Forwarded to :class:`lisa.trace.Trace`.
        """
        return Trace(self.trace_path, self.plat_info, **kwargs)
Example #4
0
class TestMockTraceParser(TestCase):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        dfs = {
            'sched_wakeup':
            pd.DataFrame.from_records(
                [
                    (0, 1, 1, 'task1', 'task1', 1, 1, 1),
                    (1, 2, 1, 'task1', 'task1', 1, 1, 2),
                    (2, 4, 2, 'task2', 'task2', 2, 1, 4),
                ],
                columns=('Time', '__cpu', '__pid', '__comm', 'comm', 'pid',
                         'prio', 'target_cpu'),
                index='Time',
            ),
        }
        self.trace = Trace(parser=MockTraceParser(dfs, time_range=(0, 42)))

    def test_df_event(self):
        df = self.trace.df_event('sched_wakeup')
        assert not df.empty
        assert 'target_cpu' in df.columns

    def test_time_range(self):
        assert self.trace.start == 0
        assert self.trace.end == 42
Example #5
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # We don't want normalized time
        self.trace = Trace(self.trace_path,
                           self.plat_info,
                           self.events,
                           normalize_time=False)
Example #6
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.test_trace = os.path.join(self.traces_dir, 'test_trace.txt')
        self.plat_info = self._get_plat_info()

        self.trace_path = os.path.join(self.traces_dir, 'trace.txt')
        self.trace = Trace(self.trace_path, self.plat_info, self.events)
Example #7
0
 def get_trace(self, trace_name):
     """
     Get a trace from a separate provided trace file
     """
     trace_path = os.path.join(self.traces_dir, trace_name, 'trace.dat')
     return Trace(
         trace_path,
         plat_info=self._get_plat_info(trace_name),
         events=self.events,
     )
Example #8
0
    def make_trace(self, in_data):
        """
        Get a trace from an embedded string of textual trace data
        """
        trace_path = os.path.join(self.res_dir, "test_trace.txt")
        with open(trace_path, "w") as fout:
            fout.write(in_data)

        return Trace(trace_path, self.plat_info, self.events,
                     normalize_time=False, plots_dir=self.res_dir)
Example #9
0
    def test_time_range_subscript(self):
        expected_duration = 4.0

        trace = Trace(self.trace_path,
                      self.plat_info,
                      self.events,
                      normalize_time=False,
        )[76.402065:80.402065]

        self.assertAlmostEqual(trace.time_range, expected_duration)
Example #10
0
    def test_time_range(self):
        expected_duration = 4.0

        trace = Trace(self.trace_path,
                      self.plat_info,
                      self.events,
                      normalize_time=False,
        ).get_view((76.402065, 80.402065))

        self.assertAlmostEqual(trace.time_range, expected_duration)
Example #11
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # We don't want normalized time
        self.trace = Trace(
            self.trace_path,
            plat_info=self.plat_info,
            events=self.events,
            normalize_time=False,
            parser=TxtTraceParser.from_txt_file,
        )
Example #12
0
    def test_time_range(self):
        """
        TestTrace: time_range is the duration of the trace
        """
        expected_duration = 6.676497

        trace = Trace(self.trace_path,
                      self.plat_info,
                      self.events,
                      normalize_time=False)

        self.assertAlmostEqual(trace.time_range, expected_duration)
Example #13
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        trace = Trace(
            self.trace_path,
            plat_info=self.plat_info,
            events=self.events,
            normalize_time=False,
            parser=TxtTraceParser.from_txt_file,
        )

        self.trace = trace[trace.start:trace.end]
Example #14
0
    def test_time_range_subscript(self):
        expected_duration = 4.0

        trace = Trace(
            self.trace_path,
            plat_info=self.plat_info,
            events=self.events,
            normalize_time=False,
            parser=TxtTraceParser.from_txt_file,
        )[76.402065:80.402065]

        assert trace.time_range == pytest.approx(expected_duration)
Example #15
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.test_trace = os.path.join(self.traces_dir, 'test_trace.txt')
        self.plat_info = self._get_plat_info()

        self.trace_path = os.path.join(self.traces_dir, 'trace.txt')
        self.trace = Trace(
            self.trace_path,
            plat_info=self.plat_info,
            events=self.events,
            parser=TxtTraceParser.from_txt_file,
        )
Example #16
0
    def test_time_range(self):
        """
        TestTrace: time_range is the duration of the trace
        """
        expected_duration = 6.676497

        trace = Trace(
            self.trace_path,
            plat_info=self.plat_info,
            events=self.events,
            normalize_time=False,
            parser=TxtTraceParser.from_txt_file,
        )

        assert trace.time_range == pytest.approx(expected_duration)
Example #17
0
    def test_estimate_from_trace(self):
        trace_data = (
            # Set all CPUs at lowest freq
            """
            <idle>-0  [000] 0000.0001: cpu_frequency:   state=1000 cpu_id=0
            <idle>-0  [000] 0000.0001: cpu_frequency:   state=1000 cpu_id=1
            <idle>-0  [000] 0000.0001: cpu_frequency:   state=3000 cpu_id=2
            <idle>-0  [000] 0000.0001: cpu_frequency:   state=3000 cpu_id=3
            """  # Set all CPUs in deepest CPU-level idle state
            """
            <idle>-0  [000] 0000.0002: cpu_idle:        state=1 cpu_id=0
            <idle>-0  [000] 0000.0002: cpu_idle:        state=1 cpu_id=1
            <idle>-0  [000] 0000.0002: cpu_idle:        state=1 cpu_id=2
            <idle>-0  [000] 0000.0002: cpu_idle:        state=1 cpu_id=3
            """  # Wake up cpu 0
            """
            <idle>-0  [000] 0000.0005: cpu_idle:        state=4294967295 cpu_id=0
            """  # Ramp up everybody's freqs to 2nd OPP
            """
            <idle>-0  [000] 0000.0010: cpu_frequency:   state=1500 cpu_id=0
            <idle>-0  [000] 0000.0010: cpu_frequency:   state=1500 cpu_id=1
            <idle>-0  [000] 0000.0010: cpu_frequency:   state=4000 cpu_id=2
            <idle>-0  [000] 0000.0010: cpu_frequency:   state=4000 cpu_id=3
            """  # Wake up the other CPUs one by one
            """
            <idle>-0  [000] 0000.0011: cpu_idle:        state=4294967295 cpu_id=1
            <idle>-0  [000] 0000.0012: cpu_idle:        state=4294967295 cpu_id=2
            <idle>-0  [000] 0000.0013: cpu_idle:        state=4294967295 cpu_id=3
            """  # Put CPU2 into "cluster sleep" (note CPU3 is still awake)
            """
            <idle>-0  [000] 0000.0020: cpu_idle:        state=2 cpu_id=2
            """
        )

        dir = mkdtemp()
        path = os.path.join(dir, 'trace.txt')
        with open(path, 'w') as f:
            f.write(trace_data)

        trace = Trace(path,
                      events=['cpu_idle', 'cpu_frequency'],
                      normalize_time=False)
        shutil.rmtree(dir)

        energy_df = em.estimate_from_trace(trace)

        exp_entries = [
            # Everybody idle
            (0.0002, {
                '0': 0.0,
                '1': 0.0,
                '2': 0.0,
                '3': 0.0,
                '0-1': 5.0,
                '2-3': 8.0,
            }),
            # CPU0 wakes up
            (0.0005, {
                '0': 100.0,  # CPU 0 now active
                '1': 0.0,
                '2': 0.0,
                '3': 0.0,
                '0-1': 10.0,  # little cluster now active
                '2-3': 8.0,
            }),
            # Ramp freqs up to 2nd OPP
            (0.0010, {
                '0': 150.0,
                '1': 0.0,
                '2': 0.0,
                '3': 0.0,
                '0-1': 15.0,
                '2-3': 8.0,
            }),
            # Wake up CPU1
            (0.0011, {
                '0': 150.0,
                '1': 150.0,
                '2': 0.0,
                '3': 0.0,
                '0-1': 15.0,
                '2-3': 8.0,
            }),
            # Wake up CPU2
            (0.0012, {
                '0': 150.0,
                '1': 150.0,
                '2': 400.0,
                '3': 0.0,
                '0-1': 15.0,
                '2-3': 40.0,  # big cluster now active
            }),
            # Wake up CPU3
            (0.0013, {
                '0': 150.0,
                '1': 150.0,
                '2': 400.0,
                '3': 400.0,
                '0-1': 15.0,
                '2-3': 40.0,
            }),
        ]

        # We don't know the exact index that will come out of the parsing
        # (because of handle_duplicate_index). Furthermore the value of the
        # energy estimation will change for infinitessimal moments between each
        # cpu_frequency event, and we don't care about that - we only care about
        # the stable value. So we'll take the value of the returned signal at
        # 0.01ms after each set of events, and assert based on that.
        df = energy_df.reindex([e[0] + 0.00001 for e in exp_entries],
                               method='ffill')

        for i, (exp_index, exp_values) in enumerate(exp_entries):
            row = df.iloc[i]
            self.assertAlmostEqual(row.name, exp_index, places=4)
            self.assertDictEqual(row.to_dict(), exp_values)
Example #18
0
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]

    plots_map = get_plots_map()
    analysis_nice_name_map = {
        get_analysis_nice_name(name): name
        for name in plots_map.keys()
    }

    parser = argparse.ArgumentParser(
        description="""
CLI for LISA analysis plots and reports from traces.

Available plots:

{}

""".format(get_analysis_listing(plots_map)),
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )

    parser.add_argument(
        'trace',
        help='trace-cmd trace.dat, or systrace file',
    )

    parser.add_argument(
        '--normalize-time',
        action='store_true',
        help=
        'Normalize the time in the plot, i.e. start at 0 instead of uptime timestamp',
    )

    parser.add_argument(
        '--plot',
        nargs=2,
        action='append',
        default=[],
        metavar=('PLOT', 'OUTPUT_PATH'),
        help=
        'Create the given plot. If OUTPUT_PATH is "interactive", an interactive window will be used',
    )

    parser.add_argument(
        '--plot-analysis',
        nargs=3,
        action='append',
        default=[],
        metavar=('ANALYSIS', 'OUTPUT_FOLDER_PATH', 'FORMAT'),
        help='Create all the plots of the given analysis',
    )

    parser.add_argument(
        '--plot-all',
        nargs=2,
        metavar=('OUTPUT_FOLDER_PATH', 'FORMAT'),
        help='Create all the plots in the given folder',
    )

    parser.add_argument(
        '--best-effort',
        action='store_true',
        help=
        'Try to generate as many of the requested plots as possible without early termination.',
    )

    parser.add_argument(
        '--window',
        nargs=2,
        type=float,
        metavar=('BEGIN', 'END'),
        help='Only plot data between BEGIN and END times',
    )

    parser.add_argument(
        '-X',
        '--option',
        nargs=2,
        action='append',
        default=[],
        metavar=('OPTION_NAME', 'VALUE'),
        help=
        'Pass extra parameters to plot methods, e.g. "-X cpu 1". Mismatching names are ignored.',
    )

    parser.add_argument(
        '--matplotlib-backend',
        default='GTK3Agg',
        help='matplotlib backend to use for interactive window',
    )

    parser.add_argument(
        '--plat-info',
        help='Platform information, necessary for some plots',
    )

    parser.add_argument(
        '--xkcd',
        action='store_true',
        help='Graphs will look like XKCD plots',
    )

    args = parser.parse_args(argv)

    flat_plot_map = {
        plot_name: meth
        for analysis_name, plot_list in plots_map.items()
        for plot_name, meth in plot_list.items()
    }

    if args.plat_info:
        plat_info = PlatformInfo.from_yaml_map(args.plat_info)
    else:
        plat_info = None

    if args.plot_all:
        folder, fmt = args.plot_all
        plot_analysis_spec_list = [(get_analysis_nice_name(analysis_name),
                                    folder, fmt)
                                   for analysis_name in plots_map.keys()]
    else:
        plot_analysis_spec_list = []

    plot_analysis_spec_list.extend(args.plot_analysis)

    plot_spec_list = [(plot_name,
                       os.path.join(folder, '{}.{}'.format(plot_name, fmt)))
                      for analysis_name, folder, fmt in plot_analysis_spec_list
                      for plot_name, meth in plots_map[
                          analysis_nice_name_map[analysis_name]].items()]

    plot_spec_list.extend(args.plot)

    # Build minimal event list to speed up trace loading time
    plot_methods = set()
    for plot_name, file_path in plot_spec_list:
        try:
            f = flat_plot_map[plot_name]
        except KeyError:
            error('Unknown plot "{}", see --help'.format(plot_name))

        plot_methods.add(f)

    # If best effort is used, we don't want to trigger exceptions ahead of
    # time. Let it fail for individual plot methods instead, so the trace can
    # be used for the other events
    if args.best_effort:
        events = None
    else:
        events = set()
        for f in plot_methods:
            with contextlib.suppress(AttributeError):
                events.update(f.used_events.get_all_events())

        events = sorted(events)
        print('Parsing trace events: {}'.format(', '.join(events)))

    trace = Trace(args.trace,
                  plat_info=plat_info,
                  events=events,
                  normalize_time=args.normalize_time,
                  write_swap=True)
    if args.window:
        window = args.window

        def clip(l, x, r):
            if x < l:
                return l
            elif x > r:
                return r
            else:
                return x

        window = (
            clip(trace.window[0], window[0], trace.window[1]),
            clip(trace.window[0], window[1], trace.window[1]),
        )
        # There is no overlap between trace and user window, reset to trace
        # window
        if window[0] == window[1]:
            print(
                'Window {} does not overlap with trace time range, maybe you forgot --normalize-time ?'
                .format(tuple(args.window)))
            window = trace.window

        trace = trace.get_view(window)

    for plot_name, file_path in sorted(plot_spec_list):
        interactive = file_path == 'interactive'
        f = flat_plot_map[plot_name]
        if interactive:
            matplotlib.use(args.matplotlib_backend)
            file_path = None
        else:
            dirname = os.path.dirname(file_path)
            if dirname:
                os.makedirs(dirname, exist_ok=True)

        kwargs = make_plot_kwargs(f,
                                  file_path,
                                  interactive=interactive,
                                  extra_options=args.option)

        xkcd_cm = plt.xkcd() if args.xkcd else nullcontext()
        with handle_plot_excep(exit_on_error=not args.best_effort):
            with xkcd_cm:
                TraceAnalysisBase.call_on_trace(f, trace, kwargs)

        if interactive:
            plt.show()
Example #19
0
    def test_estimate_from_trace(self):
        trace_data = (
            # Set all CPUs at lowest freq
            """
            <idle>-0  [000] 0000.0001: cpu_frequency:   state=1000 cpu_id=0
            <idle>-0  [000] 0000.0001: cpu_frequency:   state=1000 cpu_id=1
            <idle>-0  [000] 0000.0001: cpu_frequency:   state=3000 cpu_id=2
            <idle>-0  [000] 0000.0001: cpu_frequency:   state=3000 cpu_id=3
            """

            # Set all CPUs in deepest CPU-level idle state
            """
            <idle>-0  [000] 0000.0002: cpu_idle:        state=1 cpu_id=0
            <idle>-0  [000] 0000.0002: cpu_idle:        state=1 cpu_id=1
            <idle>-0  [000] 0000.0002: cpu_idle:        state=1 cpu_id=2
            <idle>-0  [000] 0000.0002: cpu_idle:        state=1 cpu_id=3
            """

            # Wake up cpu 0
            """
            <idle>-0  [000] 0000.0005: cpu_idle:        state=4294967295 cpu_id=0
            """

            # Ramp up everybody's freqs to 2nd OPP
            """
            <idle>-0  [000] 0000.0010: cpu_frequency:   state=1500 cpu_id=0
            <idle>-0  [000] 0000.0010: cpu_frequency:   state=1500 cpu_id=1
            <idle>-0  [000] 0000.0010: cpu_frequency:   state=4000 cpu_id=2
            <idle>-0  [000] 0000.0010: cpu_frequency:   state=4000 cpu_id=3
            """

            # Wake up the other CPUs one by one
            """
            <idle>-0  [000] 0000.0011: cpu_idle:        state=4294967295 cpu_id=1
            <idle>-0  [000] 0000.0012: cpu_idle:        state=4294967295 cpu_id=2
            <idle>-0  [000] 0000.0013: cpu_idle:        state=4294967295 cpu_id=3
            """

            # Put CPU2 into "cluster sleep" (note CPU3 is still awake)
            """
            <idle>-0  [000] 0000.0020: cpu_idle:        state=2 cpu_id=2
            """)

        with tempfile.TemporaryDirectory() as directory:
            path = os.path.join(directory, 'trace.txt')
            with open(path, 'w') as f:
                f.write(trace_data)

            trace = Trace(
                path,
                events=['cpu_idle', 'cpu_frequency'],
                normalize_time=False,
                # Disable swap since the folder is going to get removed
                enable_swap=False,
                # Parse all the events eagerly since the trace file is going to
                # be removed.
                strict_events=True,
                parser=TxtTraceParser.from_txt_file,
            )

        energy_df = em.estimate_from_trace(trace)

        exp_entries = [
            # Everybody idle
            (0.0002, {
                '0': 0.0,
                '1': 0.0,
                '2': 0.0,
                '3': 0.0,
                '0-1': 5.0,
                '2-3': 8.0,
            }),
            # CPU0 wakes up
            (
                0.0005,
                {
                    '0': 100.0,  # CPU 0 now active
                    '1': 0.0,
                    '2': 0.0,
                    '3': 0.0,
                    '0-1': 10.0,  # little cluster now active
                    '2-3': 8.0,
                }),
            # Ramp freqs up to 2nd OPP
            (0.0010, {
                '0': 150.0,
                '1': 0.0,
                '2': 0.0,
                '3': 0.0,
                '0-1': 15.0,
                '2-3': 8.0,
            }),
            # Wake up CPU1
            (0.0011, {
                '0': 150.0,
                '1': 150.0,
                '2': 0.0,
                '3': 0.0,
                '0-1': 15.0,
                '2-3': 8.0,
            }),
            # Wake up CPU2
            (
                0.0012,
                {
                    '0': 150.0,
                    '1': 150.0,
                    '2': 400.0,
                    '3': 0.0,
                    '0-1': 15.0,
                    '2-3': 40.0,  # big cluster now active
                }),
            # Wake up CPU3
            (0.0013, {
                '0': 150.0,
                '1': 150.0,
                '2': 400.0,
                '3': 400.0,
                '0-1': 15.0,
                '2-3': 40.0,
            }),
        ]

        # We don't know the exact index that will come out of the parsing
        # (because of handle_duplicate_index). Furthermore the value of the
        # energy estimation will change for infinitessimal moments between each
        # cpu_frequency event, and we don't care about that - we only care about
        # the stable value. So we'll take the value of the returned signal at
        # 0.01ms after each set of events, and assert based on that.
        df = energy_df.reindex([e[0] + 0.00001 for e in exp_entries],
                               method='ffill')

        for i, (exp_index, exp_values) in enumerate(exp_entries):
            row = df.iloc[i]
            assert row.name == pytest.approx(exp_index, abs=1e-4)
            assert row.to_dict() == exp_values
Example #20
0
 def _get_artifact_df(self, path):
     trace = Trace(path, **self._trace_kwargs)
     return self._trace_to_df(trace)
Example #21
0
rtapp_profile = {}
tasks = []
for cpu in range(4):
    tasks.append("tsk{}-{}".format(cpu, cpu))
    rtapp_profile["tsk{}".format(cpu)] = Periodic(duty_cycle_pct=50,
                                                  duration_s=120)

wload = RTA.by_profile(target, "experiment_wload", rtapp_profile)

ftrace_coll = FtraceCollector(target, events=["sched_switch"])
trace_path = os.path.join(wload.res_dir, "trace.dat")
with ftrace_coll:
    wload.run()

ftrace_coll.get_trace(trace_path)
trace = Trace(trace_path, target.plat_info, events=["sched_switch"])

# sched_switch __comm  __pid  __cpu  __line prev_comm  prev_pid  prev_prio  prev_state next_comm  next_pid  next_prio
df = trace.df_events('sched_switch')[['next_pid', 'next_comm', '__cpu']]


def analize_task_migration(task_id, ddf):
    start = ddf.index[0]
    stop = min(ddf.index[1] + 1.0, df.index[-1])
    start_cpu = ddf['__cpu'].values[0]
    stop_cpu = ddf['__cpu'].values[1]
    _df = df[start:stop][df[start:stop]['__cpu'] == start_cpu]
    print("Task {} migrated from CPU {} to CPU {}\n".format(
        task_id, start_cpu, stop_cpu))
    print(_df.to_string(max_cols=64) + "\n")