예제 #1
0
    def check_from_target(cls, target):
        for domain in target.cpufreq.iter_domains():
            if "schedutil" not in target.cpufreq.list_governors(domain[0]):
                raise CannotCreateError(
                    "Can't set schedutil governor for domain {}".format(domain))

        if 'nrg-model' not in target.plat_info:
            raise CannotCreateError("Energy model not available")
예제 #2
0
    def check_from_target(cls, target):
        super().check_from_target(target)

        try:
            target.plat_info["cpu-capacities"]
        except KeyError as e:
            raise CannotCreateError(str(e))
예제 #3
0
    def check_from_target(cls, target):
        super().check_from_target(target)

        cpus = cls.get_migration_cpus(target.plat_info)
        if not len(cpus) == cls.NR_REQUIRED_CPUS:
            raise CannotCreateError(
                "This workload requires {} CPUs of identical capacity".format(
                    cls.NR_REQUIRED_CPUS))
예제 #4
0
    def check_from_target(cls, target):
        super().check_from_target(target)

        try:
            target.plat_info["cpu-capacities"]['rtapp']
        except KeyError as e:
            raise CannotCreateError(str(e))

        # Check that there are enough CPUs of the same capacity
        cls.get_migration_cpus(target.plat_info)
예제 #5
0
파일: load_tracking.py 프로젝트: wisen/lisa
    def get_migration_cpus(cls, plat_info):
        """
        :returns: N CPUs of same capacity, with N set by :meth:`get_nr_required_cpu`.
        """
        # Iterate over descending CPU capacity groups
        nr_required_cpu = cls.get_nr_required_cpu(plat_info)
        for cpus in reversed(plat_info["capacity-classes"]):
            if len(cpus) >= nr_required_cpu:
                return cpus[:nr_required_cpu]

        raise CannotCreateError(
            "This workload requires {} CPUs of identical capacity".format(
                nr_required_cpu))
예제 #6
0
        def exp_power(row):
            task_utils = row.to_dict()
            try:
                expected_utils = nrg_model.get_optimal_placements(task_utils, capacity_margin_pct)[0]
            except EnergyModelCapacityError:
                raise CannotCreateError(
                    'The workload will result in overutilized status for all possible task placement, making it unsuitable to test EAS on this platform'
                )
            power = nrg_model.estimate_from_cpu_util(expected_utils)
            columns = list(power.keys())

            # Assemble a dataframe to plot the expected utilization
            data.append(expected_utils)
            index.append(row.name)

            return pd.Series([power[c] for c in columns], index=columns)
예제 #7
0
    def get_migration_cpus(cls, plat_info):
        """
        :returns: N CPUs of same capacity, with N set by :meth:`get_nr_required_cpu`.
        """
        # Iterate over descending CPU capacity groups
        nr_required_cpu = cls.get_nr_required_cpu(plat_info)
        cpu_classes = plat_info["capacity-classes"]

        # If the CPU capacities are writeable, it's better to give priority to
        # LITTLE cores as they will be less prone to thermal capping.
        # Otherwise, it's better to pick big cores as they will not be affected
        # by CPU invariance issues.
        if not plat_info['cpu-capacities']['writeable']:
            cpu_classes = reversed(cpu_classes)

        for cpus in cpu_classes:
            if len(cpus) >= nr_required_cpu:
                return cpus[:nr_required_cpu]

        raise CannotCreateError(
            f"This workload requires {nr_required_cpu} CPUs of identical capacity")
예제 #8
0
 def check_from_target(cls, target):
     if target.number_of_nodes < 2:
         raise CannotCreateError(
             "Target doesn't have at least two NUMA nodes")
예제 #9
0
    def get_simulated_pelt(self, task, signal_name):
        """
        Simulate a PELT signal for a given task.

        :param task: task to look for in the trace.
        :type task: int or str or tuple(int, str)

        :param signal_name: Name of the PELT signal to simulate.
        :type signal_name: str

        :return: A :class:`pandas.DataFrame` with a ``simulated`` column
            containing the simulated signal, along with the column of the
            signal as found in the trace.
        """
        logger = self.get_logger()
        trace = self.trace
        task = trace.get_task_id(task)
        cpus = trace.analysis.tasks.cpus_of_tasks([task])

        df_activation = trace.analysis.tasks.df_task_activation(
            task,
            # Util only takes into account times where the task is actually
            # executing
            preempted_value=0,
        )
        df = trace.analysis.load_tracking.df_tasks_signal(signal_name)
        df = df_filter_task_ids(df, [task])
        df = df.copy(deep=False)

        # Ignore the first activation, as its signals are incorrect
        df_activation = df_activation.iloc[2:]

        # Make sure the activation df does not start before the dataframe of
        # signal values, otherwise we cannot provide a sensible init value
        df_activation = df_activation[df.index[0]:]

        # Get the initial signal value matching the first activation we will care about
        init_iloc = df.index.get_loc(df_activation.index[0], method='ffill')
        init = df[signal_name].iloc[init_iloc]

        try:
            # PELT clock in nanoseconds
            clock = df['update_time'] * 1e-9
        except KeyError:
            if any(self.plat_info['cpu-capacities']['rtapp'][cpu] != UTIL_SCALE
                   for phase in self.wlgen_task.phases for cpu in phase.cpus):
                raise CannotCreateError(
                    'PELT time scaling can only be simulated when the PELT clock is available from the trace'
                )

            logger.warning(
                'PELT clock is not available, ftrace timestamp will be used at the expense of accuracy'
            )
            clock = None

        df['simulated'] = simulate_pelt(df_activation['active'],
                                        index=df.index,
                                        init=init,
                                        clock=clock)

        # Since load is now CPU invariant in recent kernel versions, we don't
        # rescale it back. To match the old behavior, that line is
        # needed:
        #  df['simulated'] /= self.plat_info['cpu-capacities']['rtapp'][cpu] / UTIL_SCALE
        kernel_version = self.plat_info['kernel']['version']
        if (signal_name == 'load' and kernel_version.parts[:2] < (5, 1)):
            logger().warning(
                f'Load signal is assumed to be CPU invariant, which is true for recent mainline kernels, but may be wrong for {kernel_version}'
            )

        df['error'] = df[signal_name] - df['simulated']
        df = df.dropna()
        return df
예제 #10
0
파일: misfit.py 프로젝트: ambroise-arm/lisa
 def check_from_target(cls, target):
     if not cls._has_asym_cpucapacity(target):
         raise CannotCreateError(
             "Target doesn't have asymmetric CPU capacities")
예제 #11
0
 def check_from_target(cls, target):
     if len(target.plat_info["capacity-classes"]) < 2:
        raise CannotCreateError(
        'Cannot test migration on single capacity group')
예제 #12
0
파일: misfit.py 프로젝트: rousya/lisa
 def check_from_target(cls, target):
     if not cls._has_asym_cpucapacity(target):
         raise CannotCreateError(
             "Target doesn't have SD_ASYM_CPUCAPACITY on any sched_domain")