Exemplo n.º 1
0
def bitmask(value):
    if isinstance(value, basestring):
        value = ranges_to_list(value)
    if isiterable(value):
        value = list_to_mask(value)
    if not isinstance(value, int):
        raise ValueError(value)
    return value
Exemplo n.º 2
0
def bitmask(value):
    if isinstance(value, basestring):
        value = ranges_to_list(value)
    if isiterable(value):
        value = list_to_mask(value)
    if not isinstance(value, int):
        raise ValueError(value)
    return value
Exemplo n.º 3
0
    def from_str(cls, string):
        """
        Accepts following inputs:

            * ``0``: a single integer
            * ``4-0``: and inclusive range of integers
            * ``1,2,10,55-99``: a comma separated list of the previous formats
        """
        return ranges_to_list(string)
Exemplo n.º 4
0
 def list_online_cpus(self, core=None):
     path = self.path.join('/sys/devices/system/cpu/online')
     output = self.read_value(path)
     all_online = ranges_to_list(output)
     if core:
         cpus = self.core_cpus(core)
         if not cpus:
             raise ValueError(core)
         return [o for o in all_online if o in cpus]
     else:
         return all_online
Exemplo n.º 5
0
    def _confProfile(self):

        # Task configuration
        target_cpu = self.getTargetCpu(self.loadref)
        self.rta_profile = {
            'tasks': {},
            'global': {}
        }

        # Initialize global configuration
        global_conf = {
                'default_policy': 'SCHED_OTHER',
                'duration': -1,
                'calibration': 'CPU'+str(target_cpu),
                'logdir': self.run_dir,
            }

        # Setup calibration data
        calibration = self.getCalibrationConf(target_cpu)
        global_conf['calibration'] = calibration
        if self.duration is not None:
            global_conf['duration'] = self.duration
            self.logger.warn('%14s - Limiting workload duration to %d [s]',
                             'RTApp', global_conf['duration'])
        else:
            self.logger.info('%14s - Workload duration defined by longest task',
                             'RTApp')

        # Setup default scheduling class
        if 'policy' in self.sched:
            policy = self.sched['policy'].upper()
            if policy not in ['OTHER', 'FIFO', 'RR', 'DEADLINE']:
                raise ValueError('scheduling class {} not supported'\
                        .format(policy))
            global_conf['default_policy'] = 'SCHED_' + self.sched['policy']

        self.logger.info('%14s - Default policy: %s',
                         'RTApp', global_conf['default_policy'])

        # Setup global configuration
        self.rta_profile['global'] = global_conf

        # Setup tasks parameters
        for tid in sorted(self.params['profile'].keys()):
            task = self.params['profile'][tid]

            # Initialize task configuration
            task_conf = {}

            if 'sched' not in task:
                policy = 'DEFAULT'
            else:
                policy = task['sched']['policy'].upper()
            if policy == 'DEFAULT':
                task_conf['policy'] = global_conf['default_policy']
                sched_descr = 'sched: using default policy'
            elif policy not in ['OTHER', 'FIFO', 'RR', 'DEADLINE']:
                raise ValueError('scheduling class {} not supported'\
                        .format(task['sclass']))
            else:
                task_conf.update(task['sched'])
                task_conf['policy'] = 'SCHED_' + policy
                sched_descr = 'sched: {0:s}'.format(task['sched'])

            # Initialize task phases
            task_conf['phases'] = {}

            self.logger.info('%14s - ------------------------', 'RTApp')
            self.logger.info('%14s - task [%s], %s', 'RTApp', tid, sched_descr)

            if 'delay' in task.keys():
                if task['delay'] > 0:
                    task['delay'] = int(task['delay'] * 1e6)
                    task_conf['phases']['p000000'] = {}
                    task_conf['phases']['p000000']['delay'] = task['delay']
                    self.logger.info('%14s -  | start delay: %.6f [s]',
                            'RTApp', task['delay'] / 1e6)

            self.logger.info('%14s -  | calibration CPU: %d',
                             'RTApp', target_cpu)

            if 'loops' not in task.keys():
                task['loops'] = 1
            task_conf['loop'] = task['loops']
            self.logger.info('%14s -  | loops count: %d',
                             'RTApp', task['loops'])

            # Setup task affinity
            if 'cpus' in task and task['cpus']:
                task_conf['cpus'] = ranges_to_list(task['cpus'])
                self.logger.info('%14s -  | CPUs affinity: %s',
                                 'RTApp', task['cpus'])

            # Setup task configuration
            self.rta_profile['tasks'][tid] = task_conf

            # Getting task phase descriptor
            pid=1
            for phase in task['phases']:

                # Convert time parameters to integer [us] units
                duration = int(phase.duration_s * 1e6)
                period = int(phase.period_ms * 1e3)

                # A duty-cycle of 0[%] translates on a 'sleep' phase
                if phase.duty_cycle_pct == 0:

                    self.logger.info('%14s -  + phase_%06d: sleep %.6f [s]',
                                     'RTApp', pid, duration/1e6)

                    task_phase = {
                        'loop': 1,
                        'sleep': duration,
                    }

                # A duty-cycle of 100[%] translates on a 'run-only' phase
                elif phase.duty_cycle_pct == 100:

                    self.logger.info('%14s -  + phase_%06d: batch %.6f [s]',
                                     'RTApp', pid, duration/1e6)

                    task_phase = {
                        'loop': 1,
                        'run': duration,
                    }

                # A certain number of loops is requires to generate the
                # proper load
                else:

                    cloops = -1
                    if duration >= 0:
                        cloops = int(duration / period)

                    sleep_time = period * (100 - phase.duty_cycle_pct) / 100
                    running_time = period - sleep_time

                    self.logger.info(
                            '%14s - + phase_%06d: duration %.6f [s] (%d loops)',
                            'RTApp', pid, duration/1e6, cloops)
                    self.logger.info(
                            '%14s - |  period   %6d [us], duty_cycle %3d %%',
                            'RTApp', period, phase.duty_cycle_pct)
                    self.logger.info(
                            '%14s - |  run_time %6d [us], sleep_time %6d [us]',
                            'RTApp', running_time, sleep_time)

                    task_phase = {
                        'loop': cloops,
                        'run': running_time,
                        'timer': {'ref': tid, 'period': period},
                    }

                self.rta_profile['tasks'][tid]['phases']\
                    ['p'+str(pid).zfill(6)] = task_phase

                pid+=1

            # Append task name to the list of this workload tasks
            self.tasks[tid] = {'pid': -1}

        # Generate JSON configuration on local file
        self.json = '{0:s}_{1:02d}.json'.format(self.name, self.exc_id)
        with open(self.json, 'w') as outfile:
            json.dump(self.rta_profile, outfile,
                    sort_keys=True, indent=4, separators=(',', ': '))

        return self.json
Exemplo n.º 6
0
    def from_simplifiedEM_target(cls, target,
            directory='/sys/devices/system/cpu/energy_model'):
        """
        Create an EnergyModel by reading a target filesystem on a device with
        the new Simplified Energy Model present.

        This uses the energy_model sysctl added by EAS patches to exposes
        the frequency domains, together with a tuple of capacity, frequency
        and active power for each CPU. This feature is not upstream in mainline
        Linux (as of v4.17), and only exists in Android kernels later than
        android-4.14.

        Wrt. idle states - the EnergyModel constructed won't be aware of
        any power data or topological dependencies for entering "cluster"
        idle states since the simplified model has no such concept.

        Initialises only Active States for CPUs and clears all other levels.

        :param target: Devlib target object to read filesystem from. Must have
                       cpufreq and cpuidle modules enabled.
        :returns: Constructed EnergyModel object based on the parameters
                  reported by the target.
        """
        if 'cpuidle' not in target.modules:
            raise TargetError('Requires cpuidle devlib module. Please ensure '
                               '"cpuidle" is listed in your target/test modules')

        # Simplified EM on-disk format (for each frequency domain):
        #    /sys/devices/system/cpu/energy_model/<frequency_domain>/..
        #        ../capacity
        #           contains a space-separated list of capacities in increasing order
        #        ../cpus
        #           cpulist-formatted representation of the cpus in the frequency domain
        #        ../frequency
        #           space-separated list of frequencies in corresponding order to capacities
        #        ../power
        #           space-separated list of power consumption in corresponding order to capacities
        # taken together, the contents of capacity, frequency and power give you the required
        # tuple for ActiveStates.
        # hence, domain should be supplied as a glob, and fields should be
        #   capacity, cpus, frequency, power

        sysfs_em = target.read_tree_values(directory, depth=3)

        if not sysfs_em:
            raise TargetError('Simplified Energy Model not exposed '
                              'at {} in sysfs.'.format(directory))

        cpu_to_fdom = {}
        for fd, fields in sysfs_em.iteritems():
            cpus = ranges_to_list(fields["cpus"])
            for cpu in cpus:
                cpu_to_fdom[cpu] = fd
            sysfs_em[fd]['cpus'] = cpus
            sysfs_em[fd]['frequency'] = map(int, sysfs_em[fd]['frequency'].split(' '))
            sysfs_em[fd]['power'] =  map(int, sysfs_em[fd]['power'].split(' '))

            # Compute the capacity of the CPUs at each OPP with a linerar
            # mapping to the frequencies
            sysfs = '/sys/devices/system/cpu/cpu{}/cpu_capacity'
            cap = target.read_value(sysfs.format(cpus[0]), int)
            max_freq = max(sysfs_em[fd]['frequency'])
            caps = [f * cap / max_freq for f in sysfs_em[fd]['frequency']]
            sysfs_em[fd]['capacity'] = caps

        def read_active_states(cpu):
            fd = sysfs_em[cpu_to_fdom[cpu]]
            cstates = zip(fd['capacity'], fd['power'])
            active_states = [ActiveState(c, p) for c, p in cstates]
            return OrderedDict(zip(fd['frequency'], active_states))

        def read_idle_states(cpu):
            # idle states are not supported in the new model
            # record 0 power for them all, but name them according to target
            names = [s.name for s in target.cpuidle.get_states(cpu)]
            return OrderedDict((name, 0) for name in names)

        # Read the CPU-level data
        cpus = range(target.number_of_cpus)
        cpu_nodes = []
        for cpu in cpus:
            node = EnergyModelNode(
                cpu=cpu,
                active_states=read_active_states(cpu),
                idle_states=read_idle_states(cpu))
            cpu_nodes.append(node)

        root = EnergyModelRoot(children=cpu_nodes)
        freq_domains = [sysfs_em[fdom]['cpus'] for fdom in sysfs_em]

        # We don't have a way to read the idle power domains from sysfs (the kernel
        # isn't even aware of them) so we'll just have to assume each CPU is its
        # own power domain and all idle states are independent of each other.
        cpu_pds = []
        for cpu in cpus:
            names = [s.name for s in target.cpuidle.get_states(cpu)]
            cpu_pds.append(PowerDomain(cpu=cpu, idle_states=names))

        root_pd = PowerDomain(children=cpu_pds, idle_states=[])
        return cls(root_node=root,
                   root_power_domain=root_pd,
                   freq_domains=freq_domains)
Exemplo n.º 7
0
    def _confProfile(self):

        # Sanity check for task names
        for task in self.params['profile'].keys():
            if len(task) > 15:
                # rt-app uses pthread_setname_np(3) which limits the task name
                # to 16 characters including the terminal '\0'.
                msg = ('Task name "{}" too long, please configure your tasks '
                       'with names shorter than 16 characters').format(task)
                raise ValueError(msg)

        # Task configuration
        self.rta_profile = {'tasks': {}, 'global': {}}

        # Initialize global configuration
        global_conf = {
            'default_policy': 'SCHED_OTHER',
            'duration': -1,
            'calibration': self.getCalibrationConf(),
            'logdir': self.run_dir,
        }

        if self.duration is not None:
            global_conf['duration'] = self.duration
            self._log.warn('Limiting workload duration to %d [s]',
                           global_conf['duration'])
        else:
            self._log.info('Workload duration defined by longest task')

        # Setup default scheduling class
        if 'policy' in self.sched:
            policy = self.sched['policy'].upper()
            if policy not in ['OTHER', 'FIFO', 'RR', 'DEADLINE']:
                raise ValueError('scheduling class {} not supported'\
                        .format(policy))
            global_conf['default_policy'] = 'SCHED_' + self.sched['policy']

        self._log.info('Default policy: %s', global_conf['default_policy'])

        # Setup global configuration
        self.rta_profile['global'] = global_conf

        # Setup tasks parameters
        for tid in sorted(self.params['profile'].keys()):
            task = self.params['profile'][tid]

            # Initialize task configuration
            task_conf = {}

            if 'sched' not in task:
                policy = 'DEFAULT'
            else:
                policy = task['sched']['policy'].upper()
            if policy == 'DEFAULT':
                task_conf['policy'] = global_conf['default_policy']
                sched_descr = 'sched: using default policy'
            elif policy not in ['OTHER', 'FIFO', 'RR', 'DEADLINE']:
                raise ValueError('scheduling class {} not supported'\
                        .format(task['sclass']))
            else:
                task_conf.update(task['sched'])
                task_conf['policy'] = 'SCHED_' + policy
                sched_descr = 'sched: {0:s}'.format(task['sched'])

            # Initialize task phases
            task_conf['phases'] = {}

            self._log.info('------------------------')
            self._log.info('task [%s], %s', tid, sched_descr)

            if 'delay' in task.keys():
                if task['delay'] > 0:
                    task_conf['delay'] = int(task['delay'] * 1e6)
                    self._log.info(' | start delay: %.6f [s]', task['delay'])

            if 'loops' not in task.keys():
                task['loops'] = 1
            task_conf['loop'] = task['loops']
            self._log.info(' | loops count: %d', task['loops'])

            # Setup task affinity
            if 'cpus' in task and task['cpus']:
                self._log.info(' | CPUs affinity: %s', task['cpus'])
                if isinstance(task['cpus'], str):
                    task_conf['cpus'] = ranges_to_list(task['cpus'])
                elif isinstance(task['cpus'], list):
                    task_conf['cpus'] = task['cpus']
                else:
                    raise ValueError('cpus must be a list or string')

            # Setup task configuration
            self.rta_profile['tasks'][tid] = task_conf

            # Getting task phase descriptor
            pid = 1
            for phase in task['phases']:

                # Convert time parameters to integer [us] units
                duration = int(phase.duration_s * 1e6)
                period = int(phase.period_ms * 1e3)

                # A duty-cycle of 0[%] translates on a 'sleep' phase
                if phase.duty_cycle_pct == 0:

                    self._log.info(' + phase_%06d: sleep %.6f [s]', pid,
                                   duration / 1e6)

                    task_phase = {
                        'loop': 1,
                        'sleep': duration,
                    }

                # A duty-cycle of 100[%] translates on a 'run-only' phase
                elif phase.duty_cycle_pct == 100:

                    self._log.info(' + phase_%06d: batch %.6f [s]', pid,
                                   duration / 1e6)

                    task_phase = {
                        'loop': 1,
                        'run': duration,
                    }

                # A certain number of loops is requires to generate the
                # proper load
                else:

                    cloops = -1
                    if duration >= 0:
                        cloops = int(duration / period)

                    sleep_time = period * (100 - phase.duty_cycle_pct) / 100
                    running_time = period - sleep_time

                    self._log.info(
                        '+ phase_%06d: duration %.6f [s] (%d loops)', pid,
                        duration / 1e6, cloops)
                    self._log.info('|  period   %6d [us], duty_cycle %3d %%',
                                   period, phase.duty_cycle_pct)
                    self._log.info('|  run_time %6d [us], sleep_time %6d [us]',
                                   running_time, sleep_time)

                    task_phase = {
                        'loop': cloops,
                        'run': running_time,
                        'timer': {
                            'ref': tid,
                            'period': period
                        },
                    }

                self.rta_profile['tasks'][tid]['phases']\
                    ['p'+str(pid).zfill(6)] = task_phase

                pid += 1

            # Append task name to the list of this workload tasks
            self.tasks[tid] = {'pid': -1}

        # Generate JSON configuration on local file
        self.json = '{0:s}_{1:02d}.json'.format(self.name, self.exc_id)
        with open(self.json, 'w') as outfile:
            json.dump(self.rta_profile,
                      outfile,
                      sort_keys=True,
                      indent=4,
                      separators=(',', ': '))

        return self.json
Exemplo n.º 8
0
Arquivo: rta.py Projeto: bjackman/lisa
    def _confProfile(self):

        # Sanity check for task names
        for task in self.params['profile'].keys():
            if len(task) > 15:
                # rt-app uses pthread_setname_np(3) which limits the task name
                # to 16 characters including the terminal '\0'.
                msg = ('Task name "{}" too long, please configure your tasks '
                       'with names shorter than 16 characters').format(task)
                raise ValueError(msg)

        # Task configuration
        self.rta_profile = {
            'tasks': {},
            'global': {}
        }

        # Initialize global configuration
        global_conf = {
                'default_policy': 'SCHED_OTHER',
                'duration': -1,
                'calibration': self.getCalibrationConf(),
                'logdir': self.run_dir,
            }

        if self.duration is not None:
            global_conf['duration'] = self.duration
            self._log.warn('Limiting workload duration to %d [s]',
                           global_conf['duration'])
        else:
            self._log.info('Workload duration defined by longest task')

        # Setup default scheduling class
        if 'policy' in self.sched:
            policy = self.sched['policy'].upper()
            if policy not in ['OTHER', 'FIFO', 'RR', 'DEADLINE']:
                raise ValueError('scheduling class {} not supported'\
                        .format(policy))
            global_conf['default_policy'] = 'SCHED_' + self.sched['policy']

        self._log.info('Default policy: %s', global_conf['default_policy'])

        # Setup global configuration
        self.rta_profile['global'] = global_conf

        # Setup tasks parameters
        for tid in sorted(self.params['profile'].keys()):
            task = self.params['profile'][tid]

            # Initialize task configuration
            task_conf = {}

            if 'sched' not in task:
                policy = 'DEFAULT'
            else:
                policy = task['sched']['policy'].upper()
            if policy == 'DEFAULT':
                task_conf['policy'] = global_conf['default_policy']
                sched_descr = 'sched: using default policy'
            elif policy not in ['OTHER', 'FIFO', 'RR', 'DEADLINE']:
                raise ValueError('scheduling class {} not supported'\
                        .format(task['sclass']))
            else:
                task_conf.update(task['sched'])
                task_conf['policy'] = 'SCHED_' + policy
                sched_descr = 'sched: {0:s}'.format(task['sched'])

            # Initialize task phases
            task_conf['phases'] = {}

            self._log.info('------------------------')
            self._log.info('task [%s], %s', tid, sched_descr)

            if 'delay' in task.keys():
                if task['delay'] > 0:
                    task_conf['delay'] = int(task['delay'] * 1e6)
                    self._log.info(' | start delay: %.6f [s]',
                            task['delay'])

            if 'loops' not in task.keys():
                task['loops'] = 1
            task_conf['loop'] = task['loops']
            self._log.info(' | loops count: %d', task['loops'])

            # Setup task affinity
            if 'cpus' in task and task['cpus']:
                self._log.info(' | CPUs affinity: %s', task['cpus'])
                if isinstance(task['cpus'], str):
                    task_conf['cpus'] = ranges_to_list(task['cpus'])
                elif isinstance(task['cpus'], list):
                    task_conf['cpus'] = task['cpus']
                else:
                    raise ValueError('cpus must be a list or string')


            # Setup task configuration
            self.rta_profile['tasks'][tid] = task_conf

            # Getting task phase descriptor
            pid=1
            for phase in task['phases']:

                # Convert time parameters to integer [us] units
                duration = int(phase.duration_s * 1e6)
                period = int(phase.period_ms * 1e3)

                # A duty-cycle of 0[%] translates on a 'sleep' phase
                if phase.duty_cycle_pct == 0:

                    self._log.info(' + phase_%06d: sleep %.6f [s]',
                                   pid, duration/1e6)

                    task_phase = {
                        'loop': 1,
                        'sleep': duration,
                    }

                # A duty-cycle of 100[%] translates on a 'run-only' phase
                elif phase.duty_cycle_pct == 100:

                    self._log.info(' + phase_%06d: batch %.6f [s]',
                                   pid, duration/1e6)

                    task_phase = {
                        'loop': 1,
                        'run': duration,
                    }

                # A certain number of loops is requires to generate the
                # proper load
                else:

                    cloops = -1
                    if duration >= 0:
                        cloops = int(duration / period)

                    sleep_time = period * (100 - phase.duty_cycle_pct) / 100
                    running_time = period - sleep_time

                    self._log.info('+ phase_%06d: duration %.6f [s] (%d loops)',
                                   pid, duration/1e6, cloops)
                    self._log.info('|  period   %6d [us], duty_cycle %3d %%',
                                   period, phase.duty_cycle_pct)
                    self._log.info('|  run_time %6d [us], sleep_time %6d [us]',
                                   running_time, sleep_time)

                    task_phase = {
                        'loop': cloops,
                        'run': running_time,
                        'timer': {'ref': tid, 'period': period},
                    }

                self.rta_profile['tasks'][tid]['phases']\
                    ['p'+str(pid).zfill(6)] = task_phase

                pid+=1

            # Append task name to the list of this workload tasks
            self.tasks[tid] = {'pid': -1}

        # Generate JSON configuration on local file
        self.json = '{0:s}_{1:02d}.json'.format(self.name, self.exc_id)
        with open(self.json, 'w') as outfile:
            json.dump(self.rta_profile, outfile,
                    sort_keys=True, indent=4, separators=(',', ': '))

        return self.json