Example #1
0
    def test_performance_sanity(self) -> ResultBundle:
        """
        Assert that higher CPU frequency leads to more work done
        """
        res = ResultBundle.from_bool(True)

        cpu_items = {
            cpu: {
                # We expect only one item per frequency
                item.freq: item
                for item in freq_items
            }
            for cpu, freq_items in groupby(self.sanity_items, key=lambda item: item.cpu)
        }

        failed = []
        passed = True
        for cpu, freq_items in cpu_items.items():
            sorted_items = sorted(freq_items.values(), key=lambda item: item.freq)
            work = [item.work for item in sorted_items]
            if work != sorted(work):
                passed = False
                failed.append(cpu)

        res = ResultBundle.from_bool(passed)
        work_metric = {
            cpu: {freq: item.work for freq, item in freq_items.items()}
            for cpu, freq_items in cpu_items.items()
        }
        res.add_metric('CPUs work', work_metric)
        res.add_metric('Failed CPUs', failed)

        return res
Example #2
0
    def test_cpu_invariance(self) -> AggregatedResultBundle:
        """
        Check that items using the max freq on each CPU is passing util avg test.

        There could be false positives, but they are expected to be relatively
        rare.

        .. seealso:: :class:`TaskInvariance.ITEM_CLS.test_util_behaviour`
        """
        res_list = []
        for cpu, item_group in groupby(self.invariance_items,
                                       key=lambda x: x.cpu):
            item_group = list(item_group)
            # combine all frequencies of that CPU class, although they should
            # all be the same
            max_freq = max(
                itertools.chain.from_iterable(x.freq_list for x in item_group))
            max_freq_items = [
                item for item in item_group if item.freq == max_freq
            ]
            for item in max_freq_items:
                # Only test util, as it should be more robust
                res = item.test_util_behaviour()
                res_list.append(res)

        return AggregatedResultBundle(res_list, 'cpu')
Example #3
0
    def get_prebuilt_op_set(self):
        non_reusable_type_set = self.get_non_reusable_type_set()
        op_set = set()

        # Try to build as many configurations instances from all the files we
        # are given
        conf_cls_set = set(get_subclasses(MultiSrcConf, only_leaves=True))
        conf_list = []
        for conf_path in self.args.conf:
            for conf_cls in conf_cls_set:
                try:
                    # Do not add the default source, to avoid overriding user
                    # configuration with the default one.
                    conf = conf_cls.from_yaml_map(conf_path,
                                                  add_default_src=False)
                except ValueError:
                    continue
                else:
                    conf_list.append((conf, conf_path))

        def keyfunc(conf_and_path):
            cls = type(conf_and_path[0])
            # We use the ID since classes are not comparable
            return id(cls), cls

        # Then aggregate all the conf from each type, so they just act as
        # alternative sources.
        for (_, conf_cls), conf_and_path_seq in groupby(conf_list,
                                                        key=keyfunc):
            conf_and_path_list = list(conf_and_path_seq)

            # Get the default configuration, and stack all user-defined keys
            conf = conf_cls()
            for conf_src, conf_path in conf_and_path_list:
                src = os.path.basename(conf_path)
                conf.add_src(src, conf_src)

            op_set.add(
                PrebuiltOperator(conf_cls, [conf],
                                 non_reusable_type_set=non_reusable_type_set))

        # Inject serialized objects as root operators
        for path in self.args.inject:
            obj = Serializable.from_path(path)
            op_set.add(
                PrebuiltOperator(type(obj), [obj],
                                 non_reusable_type_set=non_reusable_type_set))

        # Inject a dummy empty TargetConf
        if self.args.inject_empty_target_conf:
            op_set.add(
                PrebuiltOperator(TargetConf, [TargetConf(conf={})],
                                 non_reusable_type_set=non_reusable_type_set))

        return op_set
Example #4
0
    def test_freq_invariance(self) -> ResultBundle:
        """
        Check that at least one CPU has items passing for all tested frequencies.

        .. seealso:: :class:`InvarianceItem.test_task_util_avg`
        """

        logger = self.get_logger()

        def make_group_bundle(cpu, item_group):
            bundle = AggregatedResultBundle(
                [
                    # Only test util, as it should be more robust
                    item.test_task_util_avg() for item in item_group
                ],
                # each item's "cpu" metric also contains the frequency
                name_metric='cpu',
            )
            # At that level, we only report the CPU, since nested bundles cover
            # different frequencies
            bundle.add_metric('cpu', cpu)

            logger.info('Util avg invariance {res} for CPU {cpu}'.format(
                res=bundle.result.lower_name,
                cpu=cpu,
            ))
            return bundle

        group_result_bundles = [
            make_group_bundle(cpu, item_group)
            for cpu, item_group in groupby(self.invariance_items,
                                           key=lambda x: x.cpu)
        ]

        # The combination differs from the AggregatedResultBundle default one:
        # we consider as passed as long as at least one of the group has
        # passed, instead of forcing all of them to pass.
        if any(result_bundle.result is Result.PASSED
               for result_bundle in group_result_bundles):
            overall_result = Result.PASSED
        elif all(result_bundle.result is Result.UNDECIDED
                 for result_bundle in group_result_bundles):
            overall_result = Result.UNDECIDED
        else:
            overall_result = Result.FAILED

        return AggregatedResultBundle(group_result_bundles,
                                      name_metric='cpu',
                                      result=overall_result)
Example #5
0
 def group_by_testcase(froz_val_list):
     return OrderedDict(
         (testcase_id, [froz_val.value for froz_val in froz_val_group])
         for testcase_id, froz_val_group in groupby(froz_val_list,
                                                    key=get_id))
Example #6
0
def make_changelog(repo):
    """
    Generate a reStructuredText changelog to be included in the documentation.

    .. note:: The git repository cannot be a shallow clone, as the changelog is
        extracted from the git history.
    """
    release_refs = ['HEAD'] + lisa._git.find_tags(repo, 'v*')

    def update_release_name(name):
        if name == 'HEAD':
            return 'Next release'
        else:
            return name

    MARKERS = ['FEATURE', 'FIX', 'BREAKING CHANGE']

    # Filtering on the patterns we look for provides a considerable speedup
    commit_pattern = '(' + '|'.join(map(re.escape, MARKERS)) + ')'
    release_sha1s = {
        update_release_name(y): lisa._git.find_commits(
            repo=repo,
            ref=f'{x}..{y}',
            grep=commit_pattern,
            regex=True,
        )
        for x, y in zip(release_refs[1:], release_refs)
    }

    release_msgs = {
        release: [
            lisa._git.get_commit_message(
                repo=repo,
                ref=ref,
                format='%B',
            ).strip() for ref in refs
        ]
        for release, refs in release_sha1s.items()
    }

    def parse_msg(msg):
        selected = tuple(
            sorted({marker
                    for marker in MARKERS if marker in msg}))
        for marker in selected:
            pattern = f'^\s*{re.escape(marker)}\s*$'
            msg = re.sub(pattern, '', msg, flags=re.MULTILINE)

        return (msg, selected)

    def expand(msg, markers):
        for marker in markers:
            yield (marker, msg)

    release_msgs = {
        release: dict(
            map(
                lambda x: (x[0], list(map(itemgetter(1), x[1]))),
                groupby((entry for msg in msgs
                         for entry in expand(*parse_msg(msg))),
                        key=itemgetter(0))))
        for release, msgs in release_msgs.items()
    }

    def indent(level, content):
        idt = level * ' '
        return idt + content.replace('\n', f'\n{idt}')

    def format_release(name, sections):
        title = f'{name}\n{len(name) * "="}\n'
        body = '\n\n'.join(
            format_section(marker, _msgs) for marker, _msgs in order_as(
                sections.items(),
                order_as=MARKERS,
                key=itemgetter(0),
            ))

        return f'{title}\n{body}'

    def format_section(name, msgs):
        title = f'{name.capitalize()}\n{len(name) * "+"}\n'
        body = '\n\n'.join(map(format_msg, sorted(msgs)))
        body = indent(4, body)
        return f'{title}\n{body}'

    def format_msg(msg):
        subject = msg.splitlines()[0]
        return f'- {subject.strip()}'

    rst = '\n\n'.join(
        format_release(name, sections)
        for name, sections in release_msgs.items())

    return rst
Example #7
0
def get_deprecated_table():
    """
    Get a reStructuredText tables with titles for all the deprecated names in
    :mod:`lisa`.
    """
    def indent(string, level=1):
        idt = ' ' * 4
        return string.replace('\n', '\n' + idt * level)

    def make_entry(entry):
        msg = entry.get('msg') or ''
        removed_in = entry.get('removed_in')
        if removed_in is None:
            removed_in = ''
        else:
            removed_in = f'*Removed in: {format_version(removed_in)}*\n\n'

        name = get_sphinx_name(entry['obj'], style='rst')
        replaced_by = entry.get('replaced_by')
        if replaced_by is None:
            replaced_by = ''
        else:
            replaced_by = f"*Replaced by:* {get_sphinx_name(replaced_by, style='rst')}\n\n"

        return "* - {name}{msg}{replaced_by}{removed_in}".format(
            name=indent(name + '\n\n'),
            msg=indent(msg + '\n\n' if msg else ''),
            replaced_by=indent(replaced_by),
            removed_in=indent(removed_in),
        )

    def make_table(entries, removed_in):
        if entries:
            entries = '\n'.join(
                make_entry(entry)
                for entry in sorted(entries, key=itemgetter('name')))
            if removed_in:
                if removed_in > lisa.version.version_tuple:
                    remove = 'to be removed'
                else:
                    remove = 'removed'
                removed_in = f' {remove} in {format_version(removed_in)}'
            else:
                removed_in = ''

            table = ".. list-table:: Deprecated names{removed_in}\n    :align: left{entries}".format(
                entries=indent('\n\n' + entries),
                removed_in=removed_in,
            )
            header = f'Deprecated names{removed_in}'
            header += '\n' + '+' * len(header)

            return header + '\n\n' + table
        else:
            return ''

    entries = [{
        'name': name,
        **info
    } for name, info in get_deprecated_map().items()]

    unspecified_removal = [
        entry for entry in entries if not entry['removed_in']
    ]

    other_entries = [
        entry for entry in entries if entry not in unspecified_removal
    ]

    tables = []
    tables.append(make_table(unspecified_removal, removed_in=None))
    tables.extend(
        make_table(entries, removed_in=removed_in) for removed_in, entries in
        groupby(other_entries, itemgetter('removed_in'), reverse=True))

    return '\n\n'.join(tables)
Example #8
0
    def _calibrate(cls, target, res_dir):
        res_dir = res_dir if res_dir else target .get_res_dir(
            "rta_calib", symlink=False
        )

        pload_regexp = re.compile(r'pLoad = ([0-9]+)ns')
        pload = {}

        logger = cls.get_logger()

        # Create calibration task
        if target.is_rooted:
            max_rtprio = int(target.execute('ulimit -Hr').split('\r')[0])
            logger.debug('Max RT prio: {}'.format(max_rtprio))

            priority = max_rtprio if max_rtprio <= 10 else 10
            sched_policy = 'FIFO'
        else:
            logger.warning('Will use default scheduler class instead of RT since the target is not rooted')
            priority = None
            sched_policy = None

        for cpu in target.list_online_cpus():
            logger.info('CPU{} calibration...'.format(cpu))

            # RT-app will run a calibration for us, so we just need to
            # run a dummy task and read the output
            calib_task = Periodic(
                duty_cycle_pct=100,
                duration_s=0.001,
                period_ms=1,
                priority=priority,
                sched_policy=sched_policy,
            )
            rta = cls.by_profile(target, name="rta_calib_cpu{}".format(cpu),
                                 profile={'task1': calib_task},
                                 calibration="CPU{}".format(cpu),
                                 res_dir=res_dir)

            with rta, target.freeze_userspace():
                # Disable CPU capacities update, since that leads to infinite
                # recursion
                rta.run(as_root=target.is_rooted, update_cpu_capacities=False)

            for line in rta.output.split('\n'):
                pload_match = re.search(pload_regexp, line)
                if pload_match is None:
                    continue
                pload[cpu] = int(pload_match.group(1))
                logger.debug('>>> CPU{}: {}'.format(cpu, pload[cpu]))

        # Avoid circular import issue
        from lisa.platforms.platinfo import PlatformInfo
        snippet_plat_info = PlatformInfo({
            'rtapp': {
                'calib': pload,
            },
        })
        logger.info('Platform info rt-app calibration configuration:\n{}'.format(
            snippet_plat_info.to_yaml_map_str()
        ))

        plat_info = target.plat_info

        # Sanity check calibration values for asymmetric systems if we have
        # access to capacities
        try:
            cpu_capacities = plat_info['cpu-capacities']
        except KeyError:
            return pload

        capa_ploads = {
            capacity: {cpu: pload[cpu] for cpu, capa in cpu_caps}
            for capacity, cpu_caps in groupby(cpu_capacities.items(), itemgetter(1))
        }

        # Find the min pload per capacity level, i.e. the fastest detected CPU.
        # It is more likely to represent the right pload, as it has suffered
        # from less IRQ slowdown or similar disturbances that might be random.
        capa_pload = {
            capacity: min(ploads.values())
            for capacity, ploads in capa_ploads.items()
        }

        # Sort by capacity
        capa_pload_list = sorted(capa_pload.items())
        # unzip the list of tuples
        _, pload_list = zip(*capa_pload_list)

        # If sorting according to capa was not equivalent to reverse sorting
        # according to pload (small pload=fast cpu)
        if list(pload_list) != sorted(pload_list, reverse=True):
            raise CalibrationError('Calibration values reports big cores less capable than LITTLE cores')

        # Check that the CPU capacities seen by rt-app are similar to the one
        # the kernel uses
        true_capacities = cls.get_cpu_capacities_from_calibrations(pload)
        capa_factors_pct = {
            cpu: true_capacities[cpu] / cpu_capacities[cpu] * 100
            for cpu in cpu_capacities.keys()
        }
        dispersion_pct = max(abs(100 - factor) for factor in capa_factors_pct.values())

        logger.info('CPU capacities according to rt-app workload: {}'.format(true_capacities))

        if dispersion_pct > 2:
            logger.warning('The calibration values are not inversely proportional to the CPU capacities, the duty cycles will be up to {:.2f}% off on some CPUs: {}'.format(dispersion_pct, capa_factors_pct))

        if dispersion_pct > 20:
            logger.warning('The calibration values are not inversely proportional to the CPU capacities. Either rt-app calibration failed, or the rt-app busy loops has a very different instruction mix compared to the workload used to establish the CPU capacities: {}'.format(capa_factors_pct))

        # Map of CPUs X to list of CPUs Ys that are faster than it although CPUs
        # of Ys have a smaller capacity than X
        if len(capa_ploads) > 1:
            faster_than_map = {
                cpu1: sorted(
                    cpu2
                    for cpu2, pload2 in ploads2.items()
                    # CPU2 faster than CPU1
                    if pload2 < pload1
                )
                for (capa1, ploads1), (capa2, ploads2) in itertools.permutations(capa_ploads.items())
                for cpu1, pload1 in ploads1.items()
                # Only look at permutations in which CPUs of ploads1 are supposed
                # to be faster than the one in ploads2
                if capa1 > capa2
            }
        else:
            faster_than_map = {}

        # Remove empty lists
        faster_than_map = {
            cpu: faster_cpus
            for cpu, faster_cpus in faster_than_map.items()
            if faster_cpus
        }

        if faster_than_map:
            raise CalibrationError('Some CPUs of higher capacities are slower than other CPUs of smaller capacities: {}'.format(faster_than_map))

        return pload
Example #9
0
    SignalDesc('sched_wakeup_new', ['target_cpu']),
    SignalDesc('sched_wakeup_new', ['comm', 'pid']),
    SignalDesc('cpu_idle', ['cpu_id']),
    SignalDesc('cpu_frequency', ['cpu']),
    SignalDesc('sched_compute_energy', ['comm', 'pid']),
    SignalDesc('sched_load_se', ['comm', 'pid']),
    SignalDesc('sched_util_est_task', ['comm', 'pid']),
    SignalDesc('sched_util_est_cpu', ['cpu']),
    SignalDesc('sched_load_cfs_rq', ['path', 'cpu']),
    SignalDesc('sched_pelt_irq', ['cpu']),
    SignalDesc('sched_pelt_rt', ['cpu']),
    SignalDesc('sched_pelt_dl', ['cpu']),
    SignalDesc('uclamp_util_se', ['pid', 'comm']),
    SignalDesc('uclamp_util_cfs', ['cpu']),
    SignalDesc('sched_overutilized', []),
    SignalDesc('sched_process_wait', ['comm', 'pid']),
    SignalDesc('schedutil_em_boost', ['cpu']),
    SignalDesc('thermal_temperature', ['id']),
    SignalDesc('thermal_zone_trip', ['id']),
]
"""
List of predefined :class:`SignalDesc`.
"""

SignalDesc._SIGNALS_MAP = {
    event: list(signal_descs)
    for event, signal_descs in groupby(_SIGNALS, key=attrgetter('event'))
}

# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab