Esempio n. 1
0
class DocumentTest(unittest.TestCase):
    """Tests the `Document` class."""

    def setUp(self):
        self.env = TestEnv()
        self.tracker = self.env.tracker

    def tearDown(self):
        self.env.cleanup()

    def test_constructor(self):
        # create a document called 'test'
        path = os.path.join(self.tracker.paths["docs"], "test")
        with open(path, "w") as fp:
            fp.write("test")

        # try to initialize the doc
        Document(self.tracker, "test")

    def test_read(self):
        """Tests the `read` method"""
        pass

    def test_write(self):
        """Tests the `write` method"""
        pass
Esempio n. 2
0
class CommentTest(unittest.TestCase):
    def setUp(self):
        self.env = TestEnv()
        self.tracker = self.env.tracker
        self.issue = Issue(self.tracker)

    def tearDown(self):
        self.env.cleanup()

    def test_save(self):
        pass

    def test_save_issue(self):
        pass

    def test_delete(self):
        pass

    def test_rm(self):
        pass
Esempio n. 3
0
    def setUpClass(cls):
        cls.params = {}
        cls.env = TestEnv(test_conf=TEST_CONF)
        cls.trace_file = os.path.join(cls.env.res_dir, "offload_idle_pull.dat")
        cls.log_file = os.path.join(cls.env.res_dir, "offload_idle_pull.json")
        cls.early_starters = []
        cls.migrators = []
        cls.num_tasks = len(cls.env.target.bl.bigs)
        cls.populate_tasks()
        local_setup(cls.env)
        cls.run_workload()

        cls.offset = cls.get_offset(cls.early_starters[0])

        cls.m_assert = SchedMultiAssert(
            cls.trace_file,
            cls.env.topology,
            execnames=cls.migrators)

        cls.e_assert = SchedMultiAssert(
            cls.trace_file,
            cls.env.topology,
            execnames=cls.early_starters)
        cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w")
Esempio n. 4
0
 def setUpClass(cls):
     cls.env = TestEnv(test_conf=TEST_CONF)
     cls.target = cls.env.target
Esempio n. 5
0
 def setUp(self):
     self.env = TestEnv()
     self.tracker = self.env.tracker
Esempio n. 6
0
    def __init__(self, target_conf=None, tests_conf=None):
        """
        Tests Executor

        A tests executor is a module which support the execution of a
        configured set of experiments. Each experiment is composed by:
        - a target configuration
        - a worload to execute

        The executor module can be configured to run a set of workloads
        (wloads) in each different target configuration of a specified set
        (confs). These wloads and confs can be specified by the "tests_config"
        input dictionary.

        All the results generated by each experiment will be collected a result
        folder which is named according to this template:
            results/<test_id>/<wltype>:<conf>:<wload>/<run_id>
        where:
        - <test_id> : the "tid" defined by the tests_config, or a timestamp
                      based folder in case "tid" is not specified
        - <wltype>  : the class of workload executed, e.g. rtapp or sched_perf
        - <conf>    : the identifier of one of the specified configurations
        - <wload>   : the identified of one of the specified workload
        - <run_id>  : the progressive execution number from 1 up to the
                      specified iterations
        """

        # Initialize globals
        self._default_cgroup = None
        self._cgroup = None

        # Setup test configuration
        if isinstance(tests_conf, dict):
            logging.info('%14s - Loading custom (inline) test configuration',
                    'Target')
            self._tests_conf = tests_conf
        elif isinstance(tests_conf, str):
            logging.info('%14s - Loading custom (file) test configuration',
                    'Target')
            json_conf = JsonConf(tests_conf)
            self._tests_conf = json_conf.load()
        else:
            raise ValueError('test_conf must be either a dictionary or a filepath')

        # Check for mandatory configurations
        if 'confs' not in self._tests_conf or not self._tests_conf['confs']:
            raise ValueError(
                    'Configuration error: missing \'conf\' definitions')
        if 'wloads' not in self._tests_conf or not self._tests_conf['wloads']:
            raise ValueError(
                    'Configuration error: missing \'wloads\' definitions')

        # Setup devlib to access the configured target
        self.te = TestEnv(target_conf, tests_conf)
        self.target = self.te.target

        # Compute total number of experiments
        self._exp_count = self._tests_conf['iterations'] \
                * len(self._tests_conf['wloads']) \
                * len(self._tests_conf['confs'])

        self._print_section('Executor', 'Experiments configuration')

        logging.info('%14s - Configured to run:', 'Executor')

        logging.info('%14s -   %3d targt configurations:',
                     'Executor', len(self._tests_conf['confs']))
        target_confs = [conf['tag'] for conf in self._tests_conf['confs']]
        target_confs = ', '.join(target_confs)
        logging.info('%14s -       %s', 'Executor', target_confs)

        logging.info('%14s -   %3d workloads (%d iterations each)',
                     'Executor', len(self._tests_conf['wloads']),
                     self._tests_conf['iterations'])
        wload_confs = ', '.join(self._tests_conf['wloads'])
        logging.info('%14s -       %s', 'Executor', wload_confs)

        logging.info('%14s - Total: %d experiments',
                     'Executor', self._exp_count)

        logging.info('%14s - Results will be collected under:', 'Executor')
        logging.info('%14s -       %s', 'Executor', self.te.res_dir)
Esempio n. 7
0
from stable_baselines.common.policies import MlpLnLstmPolicy, MlpPolicy
from stable_baselines.common.vec_env import SubprocVecEnv, DummyVecEnv
from stable_baselines import PPO2

from env import TestEnv

if __name__ == '__main__':
    Test_env = SubprocVecEnv([lambda: TestEnv() for i in range(4)])

    model = PPO2(MlpLnLstmPolicy, Test_env, verbose=0, nminibatches=4)
    model.learn(total_timesteps=10000)
Esempio n. 8
0
        # 'cgroups'     # Enable for cgroup support
    ],
    "emeter": {
        'instrument': 'monsoon',
        'conf': {}
    },
    "systrace": {
        'extra_categories': ['binder_driver'],
        "extra_events": ["binder_transaction_alloc_buf"],
    },
    # Tools required by the experiments
    "tools": ['taskset'],
    "skip_nrg_model": True,
}

te = TestEnv(conf, wipe=False)
target = te.target


def run_page_stats(duration, frequency):
    procs = {}
    for i in range(int(duration / frequency)):
        ss = target.execute("cat /d/binder/stats")
        proc_dump = re.split("\nproc ", ss)[1:]

        for proc in proc_dump[1:]:
            lines = proc.split("\n  ")
            proc_id = lines[0]
            page = re.search("pages: (\d+:\d+:\d+)", proc)
            active, lru, free = map(int, page.group(1).split(":"))
            if proc_id not in procs:
Esempio n. 9
0
 def setUp(self):
     self.env = TestEnv()
Esempio n. 10
0
    def __init__(self, target_conf=None, tests_conf=None):
        """
        Tests Executor

        A tests executor is a module which support the execution of a
        configured set of experiments. Each experiment is composed by:
        - a target configuration
        - a worload to execute

        The executor module can be configured to run a set of workloads
        (wloads) in each different target configuration of a specified set
        (confs). These wloads and confs can be specified by the "tests_config"
        input dictionary.

        All the results generated by each experiment will be collected a result
        folder which is named according to this template:
            results/<test_id>/<wltype>:<conf>:<wload>/<run_id>
        where:
        - <test_id> : the "tid" defined by the tests_config, or a timestamp
                      based folder in case "tid" is not specified
        - <wltype>  : the class of workload executed, e.g. rtapp or sched_perf
        - <conf>    : the identifier of one of the specified configurations
        - <wload>   : the identified of one of the specified workload
        - <run_id>  : the progressive execution number from 1 up to the
                      specified iterations
        """

        # Initialize globals
        self._default_cgroup = None
        self._cgroup = None

        # Setup test configuration
        if isinstance(tests_conf, dict):
            logging.info('%14s - Loading custom (inline) test configuration',
                    'Target')
            self._tests_conf = tests_conf
        elif isinstance(tests_conf, str):
            logging.info('%14s - Loading custom (file) test configuration',
                    'Target')
            json_conf = JsonConf(tests_conf)
            self._tests_conf = json_conf.load()
        else:
            raise ValueError('test_conf must be either a dictionary or a filepath')

        # Check for mandatory configurations
        if 'confs' not in self._tests_conf or not self._tests_conf['confs']:
            raise ValueError(
                    'Configuration error: missing \'conf\' definitions')
        if 'wloads' not in self._tests_conf or not self._tests_conf['wloads']:
            raise ValueError(
                    'Configuration error: missing \'wloads\' definitions')

        # Setup devlib to access the configured target
        self.te = TestEnv(target_conf, tests_conf)
        self.target = self.te.target

        # Compute total number of experiments
        self._exp_count = self._tests_conf['iterations'] \
                * len(self._tests_conf['wloads']) \
                * len(self._tests_conf['confs'])

        self._print_section('Executor', 'Experiments configuration')

        logging.info('%14s - Configured to run:', 'Executor')

        logging.info('%14s -   %3d targt configurations:',
                     'Executor', len(self._tests_conf['confs']))
        target_confs = [conf['tag'] for conf in self._tests_conf['confs']]
        target_confs = ', '.join(target_confs)
        logging.info('%14s -       %s', 'Executor', target_confs)

        logging.info('%14s -   %3d workloads (%d iterations each)',
                     'Executor', len(self._tests_conf['wloads']),
                     self._tests_conf['iterations'])
        wload_confs = ', '.join(self._tests_conf['wloads'])
        logging.info('%14s -       %s', 'Executor', wload_confs)

        logging.info('%14s - Total: %d experiments',
                     'Executor', self._exp_count)

        logging.info('%14s - Results will be collected under:', 'Executor')
        logging.info('%14s -       %s', 'Executor', self.te.res_dir)
Esempio n. 11
0
class Executor():

    def __init__(self, target_conf=None, tests_conf=None):
        """
        Tests Executor

        A tests executor is a module which support the execution of a
        configured set of experiments. Each experiment is composed by:
        - a target configuration
        - a worload to execute

        The executor module can be configured to run a set of workloads
        (wloads) in each different target configuration of a specified set
        (confs). These wloads and confs can be specified by the "tests_config"
        input dictionary.

        All the results generated by each experiment will be collected a result
        folder which is named according to this template:
            results/<test_id>/<wltype>:<conf>:<wload>/<run_id>
        where:
        - <test_id> : the "tid" defined by the tests_config, or a timestamp
                      based folder in case "tid" is not specified
        - <wltype>  : the class of workload executed, e.g. rtapp or sched_perf
        - <conf>    : the identifier of one of the specified configurations
        - <wload>   : the identified of one of the specified workload
        - <run_id>  : the progressive execution number from 1 up to the
                      specified iterations
        """

        # Initialize globals
        self._default_cgroup = None
        self._cgroup = None

        # Setup test configuration
        if isinstance(tests_conf, dict):
            logging.info('%14s - Loading custom (inline) test configuration',
                    'Target')
            self._tests_conf = tests_conf
        elif isinstance(tests_conf, str):
            logging.info('%14s - Loading custom (file) test configuration',
                    'Target')
            json_conf = JsonConf(tests_conf)
            self._tests_conf = json_conf.load()
        else:
            raise ValueError('test_conf must be either a dictionary or a filepath')

        # Check for mandatory configurations
        if 'confs' not in self._tests_conf or not self._tests_conf['confs']:
            raise ValueError(
                    'Configuration error: missing \'conf\' definitions')
        if 'wloads' not in self._tests_conf or not self._tests_conf['wloads']:
            raise ValueError(
                    'Configuration error: missing \'wloads\' definitions')

        # Setup devlib to access the configured target
        self.te = TestEnv(target_conf, tests_conf)
        self.target = self.te.target

        # Compute total number of experiments
        self._exp_count = self._tests_conf['iterations'] \
                * len(self._tests_conf['wloads']) \
                * len(self._tests_conf['confs'])

        self._print_section('Executor', 'Experiments configuration')

        logging.info('%14s - Configured to run:', 'Executor')

        logging.info('%14s -   %3d targt configurations:',
                     'Executor', len(self._tests_conf['confs']))
        target_confs = [conf['tag'] for conf in self._tests_conf['confs']]
        target_confs = ', '.join(target_confs)
        logging.info('%14s -       %s', 'Executor', target_confs)

        logging.info('%14s -   %3d workloads (%d iterations each)',
                     'Executor', len(self._tests_conf['wloads']),
                     self._tests_conf['iterations'])
        wload_confs = ', '.join(self._tests_conf['wloads'])
        logging.info('%14s -       %s', 'Executor', wload_confs)

        logging.info('%14s - Total: %d experiments',
                     'Executor', self._exp_count)

        logging.info('%14s - Results will be collected under:', 'Executor')
        logging.info('%14s -       %s', 'Executor', self.te.res_dir)

    def run(self):
        self._print_section('Executor', 'Experiments execution')

        # Run all the configured experiments
        exp_idx = 1
        for tc in self._tests_conf['confs']:
            # TARGET: configuration
            if not self._target_configure(tc):
                continue
            for wl_idx in self._tests_conf['wloads']:
                # TEST: configuration
                wload = self._wload_init(tc, wl_idx)
                for itr_idx in range(1, self._tests_conf['iterations']+1):
                    # WORKLOAD: execution
                    self._wload_run(exp_idx, tc, wl_idx, wload, itr_idx)
                    exp_idx += 1

        self._print_section('Executor', 'Experiments execution completed')
        logging.info('%14s - Results available in:', 'Executor')
        logging.info('%14s -       %s', 'Executor', self.te.res_dir)


################################################################################
# Target Configuration
################################################################################

    def _cgroups_init(self, tc):
        self._default_cgroup = None
        if 'cgroups' not in tc:
            return True
        if 'cgroups' not in self.target.modules:
            raise RuntimeError('CGroups module not available. Please ensure '
                               '"cgroups" is listed in your target/test modules')
        logging.info(r'%14s - Initialize CGroups support...', 'CGroups')
        errors = False
        for kind in tc['cgroups']['conf']:
            logging.info(r'%14s - Setup [%s] controller...',
                    'CGroups', kind)
            controller = self.target.cgroups.controller(kind)
            if not controller:
                logging.warning(r'%14s - CGroups controller [%s] NOT available',
                        'CGroups', kind)
                errors = True
        return not errors

    def _setup_kernel(self, tc):
        # Deploy kernel on the device
        self.te.install_kernel(tc, reboot=True)
        # Setup the rootfs for the experiments
        self._setup_rootfs(tc)

    def _setup_sched_features(self, tc):
        if 'sched_features' not in tc:
            logging.debug('%14s - Configuration not provided', 'SchedFeatures')
            return
        feats = tc['sched_features'].split(",")
        for feat in feats:
            logging.info('%14s - Set scheduler feature: %s',
                         'SchedFeatures', feat)
            self.target.execute('echo {} > /sys/kernel/debug/sched_features'.format(feat))

    def _setup_rootfs(self, tc):
        # Initialize CGroups if required
        self._cgroups_init(tc)
        # Setup target folder for experiments execution
        self.te.run_dir = os.path.join(
                self.target.working_directory, TGT_RUN_DIR)
        # Create run folder as tmpfs
        logging.debug('%14s - Setup RT-App run folder [%s]...',
                'TargetSetup', self.te.run_dir)
        self.target.execute('[ -d {0} ] || mkdir {0}'\
                .format(self.te.run_dir), as_root=True)
        self.target.execute(
                'grep schedtest /proc/mounts || '\
                '  mount -t tmpfs -o size=1024m {} {}'\
                .format('schedtest', self.te.run_dir),
                as_root=True)

    def _setup_cpufreq(self, tc):
        if 'cpufreq' not in tc:
            logging.warning(r'%14s - governor not specified, '\
                    'using currently configured governor',
                    'CPUFreq')
            return

        cpufreq = tc['cpufreq']
        logging.info(r'%14s - Configuring all CPUs to use [%s] governor',
                'CPUFreq', cpufreq['governor'])

        self.target.cpufreq.set_all_governors(cpufreq['governor'])

        if 'params' in cpufreq:
            logging.info(r'%14s - governor params: %s',
                    'CPUFreq', str(cpufreq['params']))
            for cpu in self.target.list_online_cpus():
                self.target.cpufreq.set_governor_tunables(
                        cpu,
                        cpufreq['governor'],
                        **cpufreq['params'])

    def _setup_cgroups(self, tc):
        if 'cgroups' not in tc:
            return True
        # Setup default CGroup to run tasks into
        if 'default' in tc['cgroups']:
            self._default_cgroup = tc['cgroups']['default']
        # Configure each required controller
        if 'conf' not in tc['cgroups']:
            return True
        errors = False
        for kind in tc['cgroups']['conf']:
            controller = self.target.cgroups.controller(kind)
            if not controller:
                logging.warning(r'%14s - Configuration error: '\
                        '[%s] contoller NOT supported',
                        'CGroups', kind)
                errors = True
                continue
            self._setup_controller(tc, controller)
        return not errors

    def _setup_controller(self, tc, controller):
        kind = controller.kind
        # Configure each required groups for that controller
        errors = False
        for name in tc['cgroups']['conf'][controller.kind]:
            if name[0] != '/':
                raise ValueError('Wrong CGroup name [{}]. '
                                 'CGroups names must start by "/".'\
                                 .format(name))
            group = controller.cgroup(name)
            if not group:
                logging.warning(r'%14s - Configuration error: '\
                        '[%s/%s] cgroup NOT available',
                        'CGroups', kind, name)
                errors = True
                continue
            self._setup_group(tc, group)
        return not errors

    def _setup_group(self, tc, group):
        kind = group.controller.kind
        name = group.name
        # Configure each required attribute
        group.set(**tc['cgroups']['conf'][kind][name])

    def _target_configure(self, tc):
        self._print_header('TargetConfig',
                r'configuring target for [{}] experiments'\
                .format(tc['tag']))
        self._setup_kernel(tc)
        self._setup_sched_features(tc)
        self._setup_cpufreq(tc)
        return self._setup_cgroups(tc)

    def _target_conf_flag(self, tc, flag):
        if 'flags' not in tc:
            has_flag = False
        else:
            has_flag = flag in tc['flags']
        logging.debug('%14s - Check if target conf [%s] has flag [%s]: %s',
                'TargetConf', tc['tag'], flag, has_flag)
        return has_flag


################################################################################
# Workload Setup and Execution
################################################################################

    def _wload_cpus(self, wl_idx, wlspec):
        if not 'cpus' in wlspec['conf']:
            return None
        cpus = wlspec['conf']['cpus']

        if type(cpus) == list:
            return cpus
        if type(cpus) == int:
            return [cpus]

        # SMP target (or not bL module loaded)
        if not hasattr(self.target, 'bl'):
            if 'first' in cpus:
                return [ self.target.list_online_cpus()[0] ]
            if 'last' in cpus:
                return [ self.target.list_online_cpus()[-1] ]
            return self.target.list_online_cpus()

        # big.LITTLE target
        if cpus.startswith('littles'):
            if 'first' in cpus:
                return [ self.target.bl.littles_online[0] ]
            if 'last' in cpus:
                return [ self.target.bl.littles_online[-1] ]
            return self.target.bl.littles_online
        if cpus.startswith('bigs'):
            if 'first' in cpus:
                return [ self.target.bl.bigs_online[0] ]
            if 'last' in cpus:
                return [ self.target.bl.bigs_online[-1] ]
            return self.target.bl.bigs_online
        raise ValueError('Configuration error - '
                'unsupported [{}] \'cpus\' value for [{}] '\
                'workload specification'\
                .format(cpus, wl_idx))

    def _wload_task_idxs(self, wl_idx, tasks):
        if type(tasks) == int:
            return range(tasks)
        if tasks == 'cpus':
            return range(len(self.target.core_names))
        if tasks == 'little':
            return range(len([t
                for t in self.target.core_names
                if t == self.target.little_core]))
        if tasks == 'big':
            return range(len([t
                for t in self.target.core_names
                if t == self.target.big_core]))
        raise ValueError('Configuration error - '
                'unsupported \'tasks\' value for [{}] '\
                'RT-App workload specification'\
                .format(wl_idx))

    def _wload_rtapp(self, wl_idx, wlspec, cpus):
        conf = wlspec['conf']
        logging.debug(r'%14s - Configuring [%s] rt-app...',
                'RTApp', conf['class'])

        # Setup a default "empty" task name prefix
        if 'prefix' not in conf:
            conf['prefix'] = 'task_'

        # Setup a default loadref CPU
        loadref = None
        if 'loadref' in wlspec:
            loadref = wlspec['loadref']

        if conf['class'] == 'profile':
            params = {}
            # Load each task specification
            for task_name in conf['params']:
                task = conf['params'][task_name]
                task_name = conf['prefix'] + task_name
                if task['kind'] not in wlgen.__dict__:
                    logging.error(r'%14s - RTA task of kind [%s] not supported',
                            'RTApp', task['kind'])
                    raise ValueError('Configuration error - '
                        'unsupported \'kind\' value for task [{}] '\
                        'in RT-App workload specification'\
                        .format(task))
                task_ctor = getattr(wlgen, task['kind'])
                params[task_name] = task_ctor(**task['params']).get()
            rtapp = wlgen.RTA(self.target,
                        wl_idx, calibration = self.te.calibration())
            rtapp.conf(kind='profile', params=params, loadref=loadref,
                    cpus=cpus, run_dir=self.te.run_dir)
            return rtapp

        if conf['class'] == 'periodic':
            task_idxs = self._wload_task_idxs(wl_idx, conf['tasks'])
            params = {}
            for idx in task_idxs:
                task = conf['prefix'] + str(idx)
                params[task] = wlgen.Periodic(**conf['params']).get()
            rtapp = wlgen.RTA(self.target,
                        wl_idx, calibration = self.te.calibration())
            rtapp.conf(kind='profile', params=params, loadref=loadref,
                    cpus=cpus, run_dir=self.te.run_dir)
            return rtapp

        if conf['class'] == 'custom':
            rtapp = wlgen.RTA(self.target,
                        wl_idx, calibration = self.te.calib)
            rtapp.conf(kind='custom',
                    params=conf['json'],
                    duration=conf['duration'],
                    loadref=loadref,
                    cpus=cpus, run_dir=self.te.run_dir)
            return rtapp

        raise ValueError('Configuration error - '
                'unsupported \'class\' value for [{}] '\
                'RT-App workload specification'\
                .format(wl_idx))

    def _wload_perf_bench(self, wl_idx, wlspec, cpus):
        conf = wlspec['conf']
        logging.debug(r'%14s - Configuring perf_message...',
                'PerfMessage')

        if conf['class'] == 'messaging':
            perf_bench = wlgen.PerfMessaging(self.target, wl_idx)
            perf_bench.conf(**conf['params'])
            return perf_bench

        if conf['class'] == 'pipe':
            perf_bench = wlgen.PerfPipe(self.target, wl_idx)
            perf_bench.conf(**conf['params'])
            return perf_bench

        raise ValueError('Configuration error - '\
                'unsupported \'class\' value for [{}] '\
                'perf bench workload specification'\
                .format(wl_idx))

    def _wload_conf(self, wl_idx, wlspec):

        # CPUS: setup execution on CPUs if required by configuration
        cpus = self._wload_cpus(wl_idx, wlspec)

        # CGroup: setup CGroups if requried by configuration
        self._cgroup = self._default_cgroup
        if 'cgroup' in wlspec:
            if 'cgroups' not in self.target.modules:
                raise RuntimeError('Target not supporting CGroups or CGroups '
                                   'not configured for the current test configuration')
            self._cgroup = wlspec['cgroup']

        if wlspec['type'] == 'rt-app':
            return self._wload_rtapp(wl_idx, wlspec, cpus)
        if wlspec['type'] == 'perf_bench':
            return self._wload_perf_bench(wl_idx, wlspec, cpus)


        raise ValueError('Configuration error - '
                'unsupported \'type\' value for [{}] '\
                'workload specification'\
                .format(wl_idx))

    def _wload_init(self, tc, wl_idx):
        tc_idx = tc['tag']

        # Configure the test workload
        wlspec = self._tests_conf['wloads'][wl_idx]
        wload = self._wload_conf(wl_idx, wlspec)

        # Keep track of platform configuration
        self.te.test_dir = '{}/{}:{}:{}'\
            .format(self.te.res_dir, wload.wtype, tc_idx, wl_idx)
        os.system('mkdir -p ' + self.te.test_dir)
        self.te.platform_dump(self.te.test_dir)

        # Keep track of kernel configuration and version
        config = self.target.config
        with gzip.open(os.path.join(self.te.test_dir, 'kernel.config'), 'wb') as fh:
            fh.write(config.text)
        output = self.target.execute('{} uname -a'\
                .format(self.target.busybox))
        with open(os.path.join(self.te.test_dir, 'kernel.version'), 'w') as fh:
            fh.write(output)

        return wload

    def _wload_run_init(self, run_idx):
        self.te.out_dir = '{}/{}'\
                .format(self.te.test_dir, run_idx)
        logging.debug(r'%14s - out_dir [%s]', 'Executor', self.te.out_dir)
        os.system('mkdir -p ' + self.te.out_dir)

        logging.debug(r'%14s - cleanup target output folder', 'Executor')

        target_dir = self.target.working_directory
        logging.debug('%14s - setup target directory [%s]',
                'Executor', target_dir)

    def _wload_run(self, exp_idx, tc, wl_idx, wload, run_idx):
        tc_idx = tc['tag']

        self._print_title('Executor', 'Experiment {}/{}, [{}:{}] {}/{}'\
                .format(exp_idx, self._exp_count,
                        tc_idx, wl_idx,
                        run_idx, self._tests_conf['iterations']))

        # Setup local results folder
        self._wload_run_init(run_idx)

        # FTRACE: start (if a configuration has been provided)
        if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'):
            logging.warning('%14s - FTrace events collection enabled', 'Executor')
            self.te.ftrace.start()

        # ENERGY: start sampling
        if self.te.emeter:
            self.te.emeter.reset()

        # WORKLOAD: Run the configured workload
        wload.run(out_dir=self.te.out_dir, cgroup=self._cgroup)

        # ENERGY: collect measurements
        if self.te.emeter:
            self.te.emeter.report(self.te.out_dir)

        # FTRACE: stop and collect measurements
        if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'):
            self.te.ftrace.stop()

            trace_file = self.te.out_dir + '/trace.dat'
            self.te.ftrace.get_trace(trace_file)
            logging.info(r'%14s - Collected FTrace binary trace:', 'Executor')
            logging.info(r'%14s -    %s', 'Executor',
                         trace_file.replace(self.te.res_dir, '<res_dir>'))

            stats_file = self.te.out_dir + '/trace_stat.json'
            self.te.ftrace.get_stats(stats_file)
            logging.info(r'%14s - Collected FTrace function profiling:', 'Executor')
            logging.info(r'%14s -    %s', 'Executor',
                         stats_file.replace(self.te.res_dir, '<res_dir>'))

        self._print_footer('Executor')

################################################################################
# Utility Functions
################################################################################

    def _print_section(self, tag, message):
        logging.info('')
        logging.info(FMT_SECTION)
        logging.info(r'%14s - %s', tag, message)
        logging.info(FMT_SECTION)

    def _print_header(self, tag, message):
        logging.info('')
        logging.info(FMT_HEADER)
        logging.info(r'%14s - %s', tag, message)

    def _print_title(self, tag, message):
        logging.info(FMT_TITLE)
        logging.info(r'%14s - %s', tag, message)

    def _print_footer(self, tag, message=None):
        if message:
            logging.info(r'%14s - %s', tag, message)
        logging.info(FMT_FOOTER)
Esempio n. 12
0
class TestIssue(unittest.TestCase):
    def setUp(self):
        self.env = TestEnv()
        self.tracker = self.env.tracker

    def tearDown(self):
        self.env.cleanup()

    def test_setattr(self):
        issue = Issue(self.tracker)
        issue.title = "test title"
        # Does the __setattr__ method work?
        # i.e.: setting issue.title shoud set issue.fields['title']
        assert issue.title == issue.fields["title"]

    def test_getattribute(self):
        issue = Issue(self.tracker)
        issue.fields["title"] = "test title"
        # Does the __getattribute__ method work?
        # i.e.: issue.title should return issue.fields['title']
        assert issue.fields["title"] == issue.title

    def test_eq(self):
        issue1 = Issue(self.tracker)
        issue1.save()
        issue2 = Issue(self.tracker, issue1.id)
        assert issue1 == issue2

    def test_ne(self):
        issue1 = Issue(self.tracker)
        issue1.save()
        issue2 = Issue(self.tracker)
        issue2.save()
        assert issue1 != issue2

    def test_comments(self):
        issue = Issue(self.tracker)
        issue.save()
        # Make some comments
        comments = [Comment(issue) for i in range(20)]
        for c in comments:
            c.save()
        assert type(issue.comments()) is list
        assert len(issue.comments()) == 20
        assert len(issue.comments(n=15)) == 15

    def test_comment(self):
        issue = Issue(self.tracker)
        issue.save()
        c = Comment(issue)
        c.save()
        assert type(issue.comment(c.id)) is Comment
        assert issue.comment(c.id).fields == c.fields

    def test_save(self):
        issue = Issue(self.tracker)
        issue.title = "test title"
        issue.save()

        # Get the path to the issue and assert that it's there.
        path = self.tracker.get_issue_path(issue.id)
        assert os.path.exists(path)

        # Does the issue initialized from file match the original?
        issue_clone = Issue(self.tracker, issue.id)
        assert issue_clone.fields == issue.fields

    def test_timestamps(self):
        issue = Issue(self.tracker)
        issue.title = "test title"
        issue.save()
        assert issue.created == issue.updated
Esempio n. 13
0
class DatabaseTest(unittest.TestCase):
    '''Tests the `SQLiteIssueDatabase` class.'''

    def setUp(self):
        self.env = TestEnv()

    def tearDown(self):
        self.env.cleanup()

    def test_constructor(self):
        '''Test the __init__ method.'''
        path = os.path.join(self.env.tracker.paths['admin'],
                            'cache', 'tracker.db')

        # Init a database when one does not exist:
        db1 = Database(self.env.tracker)
        assert os.path.exists(path)
        assert db1.conn

        # Init a database when one already exists:
        db2 = Database(self.env.tracker)
        assert os.path.exists(path)
        assert db2.conn

    def test__apply_working_tree(self):
        '''Tests the `_apply_working_tree` method'''
        pass

    def test__insert_many_from_shas(self):
        '''Tests the `_insert_many_from_shas` method'''
        pass

    def test__integrity_check(self):
        '''Tests the `_integrity_check` method'''
        pass

    def test__replicate(self):
        '''Tests the `_replicate` method'''
        pass

    def test__set_update(self):
        '''Tests the `_set_update` method'''
        pass

    def test_insert(self):
        '''Tests the `insert` method'''
        db = Database(self.env.tracker)
        # make and insert the issue
        issue1 = Issue(self.env.tracker)
        issue1.content = 'test'
        issue1.title = 'Test'
        issue1.save()
        db.insert(issue1)
        rows = db.select().execute()
        issue2 = [Issue(self.env.tracker, r['id']) for r in rows][0]
        # make sure the issues are equal which triggers the __eq__ method.
        assert issue1 == issue2

    def test_insert_many(self):
        '''Tests the `insert_many` method'''
        db = Database(self.env.tracker)
        issues = []
        for i in range(20):
            issue = Issue(self.env.tracker)
            issue.save()
            issues.append(issue)
        db.insert_many(issues)
        rows = db.select().execute()
        db_issues = [Issue(self.env.tracker, r['id']) for r in rows]
        # quick check to make sure they're all there.
        assert len(db_issues) == len(issues)

    def test_select(self):
        '''Tests the `select` method'''
        db = Database(self.env.tracker)
        # should just return an sqlalchemy.sql.Select object.
        assert type(db.select()) is Select
Esempio n. 14
0
 def setUp(self):
     self.env = TestEnv()
Esempio n. 15
0
 def setUp(self):
     # a random path to init trackers at
     self.path = get_uuid() 
     self.env = TestEnv(False)
Esempio n. 16
0
class TrackerTest(unittest.TestCase):
    def setUp(self):
        # a random path to init trackers at
        self.path = get_uuid() 
        self.env = TestEnv(False)

    def tearDown(self):
        # delete the tracker at self.path
        if os.path.exists(self.path):
            self.env.rm_tracker(self.path)

    def test_constructor(self):
        # supplying non-existent paths should raise OSError
        try:
            Tracker(self.path)
        except OSError:
            pass

        # supplying a valid path should work.
        t1 = Tracker.new(self.path)
        t2 = Tracker(t1.paths['root'])
        assert type(t2) is Tracker

    def test_new(self):
        # verify that new() returns a Tracker object.
        t = Tracker.new(self.path)
        assert type(t) is Tracker

        # verfify that the tracker was created on disk.
        assert os.path.isdir(t.paths['root'])

    def test_read(self):
        # stub
        pass

    def test_update(self):
        # stub
        pass

    def test_issue(self):
        t = Tracker.new(self.path)
        i1 = Issue(t)
        i1.save()
        i2 = t.issue(i1.id)
        # verify that issue() returns an Issue
        assert type(i2) is Issue
        # verify that the issues match
        assert i1.fields == i2.fields

        # invalid SHAs should raise BadReference
        invalid_sha = get_uuid()
        try:
            t.issue(invalid_sha)
        except BadReference:
            pass
        
    def test_issues(self):
        t = Tracker.new(self.path)
        # make a bunch of issues
        issues = [Issue(t) for i in range(50)]

    def test_get_issue_path(self):
        pass

    def test_get_issues(self):
        pass
Esempio n. 17
0
 def setUp(self):
     self.env = TestEnv()
     self.tracker = self.env.tracker
Esempio n. 18
0
class DatabaseTest(unittest.TestCase):
    '''Tests the `SQLiteIssueDatabase` class.'''
    def setUp(self):
        self.env = TestEnv()

    def tearDown(self):
        self.env.cleanup()

    def test_constructor(self):
        '''Test the __init__ method.'''
        path = os.path.join(self.env.tracker.paths['admin'], 'cache',
                            'tracker.db')

        # Init a database when one does not exist:
        db1 = Database(self.env.tracker)
        assert os.path.exists(path)
        assert db1.conn

        # Init a database when one already exists:
        db2 = Database(self.env.tracker)
        assert os.path.exists(path)
        assert db2.conn

    def test__apply_working_tree(self):
        '''Tests the `_apply_working_tree` method'''
        pass

    def test__insert_many_from_shas(self):
        '''Tests the `_insert_many_from_shas` method'''
        pass

    def test__integrity_check(self):
        '''Tests the `_integrity_check` method'''
        pass

    def test__replicate(self):
        '''Tests the `_replicate` method'''
        pass

    def test__set_update(self):
        '''Tests the `_set_update` method'''
        pass

    def test_insert(self):
        '''Tests the `insert` method'''
        db = Database(self.env.tracker)
        # make and insert the issue
        issue1 = Issue(self.env.tracker)
        issue1.content = 'test'
        issue1.title = 'Test'
        issue1.save()
        db.insert(issue1)
        rows = db.select().execute()
        issue2 = [Issue(self.env.tracker, r['id']) for r in rows][0]
        # make sure the issues are equal which triggers the __eq__ method.
        assert issue1 == issue2

    def test_insert_many(self):
        '''Tests the `insert_many` method'''
        db = Database(self.env.tracker)
        issues = []
        for i in range(20):
            issue = Issue(self.env.tracker)
            issue.save()
            issues.append(issue)
        db.insert_many(issues)
        rows = db.select().execute()
        db_issues = [Issue(self.env.tracker, r['id']) for r in rows]
        # quick check to make sure they're all there.
        assert len(db_issues) == len(issues)

    def test_select(self):
        '''Tests the `select` method'''
        db = Database(self.env.tracker)
        # should just return an sqlalchemy.sql.Select object.
        assert type(db.select()) is Select
Esempio n. 19
0
class TestIssue(unittest.TestCase):
    def setUp(self):
        self.env = TestEnv()
        self.tracker = self.env.tracker

    def tearDown(self):
        self.env.cleanup()

    def test_setattr(self):
        issue = Issue(self.tracker)
        issue.title = 'test title'
        # Does the __setattr__ method work?
        # i.e.: setting issue.title shoud set issue.fields['title']
        assert issue.title == issue.fields['title']

    def test_getattribute(self):
        issue = Issue(self.tracker)
        issue.fields['title'] = 'test title'
        # Does the __getattribute__ method work?
        # i.e.: issue.title should return issue.fields['title']
        assert issue.fields['title'] == issue.title

    def test_eq(self):
        issue1 = Issue(self.tracker)
        issue1.save()
        issue2 = Issue(self.tracker, issue1.id)
        assert issue1 == issue2

    def test_ne(self):
        issue1 = Issue(self.tracker)
        issue1.save()
        issue2 = Issue(self.tracker)
        issue2.save()
        assert issue1 != issue2

    def test_comments(self):
        issue = Issue(self.tracker)
        issue.save()
        # Make some comments
        comments = [Comment(issue) for i in range(20)]
        for c in comments:
            c.save()
        assert type(issue.comments()) is list
        assert len(issue.comments()) == 20
        assert len(issue.comments(n=15)) == 15

    def test_comment(self):
        issue = Issue(self.tracker)
        issue.save()
        c = Comment(issue)
        c.save()
        assert type(issue.comment(c.id)) is Comment
        assert issue.comment(c.id).fields == c.fields

    def test_save(self):
        issue = Issue(self.tracker)
        issue.title = 'test title'
        issue.save()

        # Get the path to the issue and assert that it's there.
        path = self.tracker.get_issue_path(issue.id)
        assert os.path.exists(path)

        # Does the issue initialized from file match the original?
        issue_clone = Issue(self.tracker, issue.id)
        assert issue_clone.fields == issue.fields

    def test_timestamps(self):
        issue = Issue(self.tracker)
        issue.title = 'test title'
        issue.save()
        assert issue.created == issue.updated
Esempio n. 20
0
 def setUp(self):
     self.env = TestEnv()
     self.tracker = self.env.tracker
     self.issue = Issue(self.tracker)
Esempio n. 21
0
class Executor():

    def __init__(self, target_conf=None, tests_conf=None):
        """
        Tests Executor

        A tests executor is a module which support the execution of a
        configured set of experiments. Each experiment is composed by:
        - a target configuration
        - a worload to execute

        The executor module can be configured to run a set of workloads
        (wloads) in each different target configuration of a specified set
        (confs). These wloads and confs can be specified by the "tests_config"
        input dictionary.

        All the results generated by each experiment will be collected a result
        folder which is named according to this template:
            results/<test_id>/<wltype>:<conf>:<wload>/<run_id>
        where:
        - <test_id> : the "tid" defined by the tests_config, or a timestamp
                      based folder in case "tid" is not specified
        - <wltype>  : the class of workload executed, e.g. rtapp or sched_perf
        - <conf>    : the identifier of one of the specified configurations
        - <wload>   : the identified of one of the specified workload
        - <run_id>  : the progressive execution number from 1 up to the
                      specified iterations
        """

        # Initialize globals
        self._default_cgroup = None
        self._cgroup = None

        # Setup test configuration
        if isinstance(tests_conf, dict):
            logging.info('%14s - Loading custom (inline) test configuration',
                    'Target')
            self._tests_conf = tests_conf
        elif isinstance(tests_conf, str):
            logging.info('%14s - Loading custom (file) test configuration',
                    'Target')
            json_conf = JsonConf(tests_conf)
            self._tests_conf = json_conf.load()
        else:
            raise ValueError('test_conf must be either a dictionary or a filepath')

        # Check for mandatory configurations
        if 'confs' not in self._tests_conf or not self._tests_conf['confs']:
            raise ValueError(
                    'Configuration error: missing \'conf\' definitions')
        if 'wloads' not in self._tests_conf or not self._tests_conf['wloads']:
            raise ValueError(
                    'Configuration error: missing \'wloads\' definitions')

        # Setup devlib to access the configured target
        self.te = TestEnv(target_conf, tests_conf)
        self.target = self.te.target

        # Compute total number of experiments
        self._exp_count = self._tests_conf['iterations'] \
                * len(self._tests_conf['wloads']) \
                * len(self._tests_conf['confs'])

        self._print_section('Executor', 'Experiments configuration')

        logging.info('%14s - Configured to run:', 'Executor')

        logging.info('%14s -   %3d targt configurations:',
                     'Executor', len(self._tests_conf['confs']))
        target_confs = [conf['tag'] for conf in self._tests_conf['confs']]
        target_confs = ', '.join(target_confs)
        logging.info('%14s -       %s', 'Executor', target_confs)

        logging.info('%14s -   %3d workloads (%d iterations each)',
                     'Executor', len(self._tests_conf['wloads']),
                     self._tests_conf['iterations'])
        wload_confs = ', '.join(self._tests_conf['wloads'])
        logging.info('%14s -       %s', 'Executor', wload_confs)

        logging.info('%14s - Total: %d experiments',
                     'Executor', self._exp_count)

        logging.info('%14s - Results will be collected under:', 'Executor')
        logging.info('%14s -       %s', 'Executor', self.te.res_dir)

    def run(self):
        self._print_section('Executor', 'Experiments execution')

        # Run all the configured experiments
        exp_idx = 1
        for tc in self._tests_conf['confs']:
            # TARGET: configuration
            if not self._target_configure(tc):
                continue
            for wl_idx in self._tests_conf['wloads']:
                # TEST: configuration
                wload = self._wload_init(tc, wl_idx)
                for itr_idx in range(1, self._tests_conf['iterations']+1):
                    # WORKLOAD: execution
                    self._wload_run(exp_idx, tc, wl_idx, wload, itr_idx)
                    exp_idx += 1

        self._print_section('Executor', 'Experiments execution completed')
        logging.info('%14s - Results available in:', 'Executor')
        logging.info('%14s -       %s', 'Executor', self.te.res_dir)


################################################################################
# Target Configuration
################################################################################

    def _cgroups_init(self, tc):
        self._default_cgroup = None
        if 'cgroups' not in tc:
            return True
        if 'cgroups' not in self.target.modules:
            raise RuntimeError('CGroups module not available. Please ensure '
                               '"cgroups" is listed in your target/test modules')
        logging.info(r'%14s - Initialize CGroups support...', 'CGroups')
        errors = False
        for kind in tc['cgroups']['conf']:
            logging.info(r'%14s - Setup [%s] controller...',
                    'CGroups', kind)
            controller = self.target.cgroups.controller(kind)
            if not controller:
                logging.warning(r'%14s - CGroups controller [%s] NOT available',
                        'CGroups', kind)
                errors = True
        return not errors

    def _setup_kernel(self, tc):
        # Deploy kernel on the device
        self.te.install_kernel(tc, reboot=True)
        # Setup the rootfs for the experiments
        self._setup_rootfs(tc)

    def _setup_sched_features(self, tc):
        if 'sched_features' not in tc:
            logging.debug('%14s - Configuration not provided', 'SchedFeatures')
            return
        feats = tc['sched_features'].split(",")
        for feat in feats:
            logging.info('%14s - Set scheduler feature: %s',
                         'SchedFeatures', feat)
            self.target.execute('echo {} > /sys/kernel/debug/sched_features'.format(feat))

    def _setup_rootfs(self, tc):
        # Initialize CGroups if required
        self._cgroups_init(tc)
        # Setup target folder for experiments execution
        self.te.run_dir = os.path.join(
                self.target.working_directory, TGT_RUN_DIR)
        # Create run folder as tmpfs
        logging.debug('%14s - Setup RT-App run folder [%s]...',
                'TargetSetup', self.te.run_dir)
        self.target.execute('[ -d {0} ] || mkdir {0}'\
                .format(self.te.run_dir), as_root=True)
        self.target.execute(
                'grep schedtest /proc/mounts || '\
                '  mount -t tmpfs -o size=1024m {} {}'\
                .format('schedtest', self.te.run_dir),
                as_root=True)

    def _setup_cpufreq(self, tc):
        if 'cpufreq' not in tc:
            logging.warning(r'%14s - governor not specified, '\
                    'using currently configured governor',
                    'CPUFreq')
            return

        cpufreq = tc['cpufreq']
        logging.info(r'%14s - Configuring all CPUs to use [%s] governor',
                'CPUFreq', cpufreq['governor'])

        self.target.cpufreq.set_all_governors(cpufreq['governor'])

        if 'params' in cpufreq:
            logging.info(r'%14s - governor params: %s',
                    'CPUFreq', str(cpufreq['params']))
            for cpu in self.target.list_online_cpus():
                self.target.cpufreq.set_governor_tunables(
                        cpu,
                        cpufreq['governor'],
                        **cpufreq['params'])

    def _setup_cgroups(self, tc):
        if 'cgroups' not in tc:
            return True
        # Setup default CGroup to run tasks into
        if 'default' in tc['cgroups']:
            self._default_cgroup = tc['cgroups']['default']
        # Configure each required controller
        if 'conf' not in tc['cgroups']:
            return True
        errors = False
        for kind in tc['cgroups']['conf']:
            controller = self.target.cgroups.controller(kind)
            if not controller:
                logging.warning(r'%14s - Configuration error: '\
                        '[%s] contoller NOT supported',
                        'CGroups', kind)
                errors = True
                continue
            self._setup_controller(tc, controller)
        return not errors

    def _setup_controller(self, tc, controller):
        kind = controller.kind
        # Configure each required groups for that controller
        errors = False
        for name in tc['cgroups']['conf'][controller.kind]:
            if name[0] != '/':
                raise ValueError('Wrong CGroup name [{}]. '
                                 'CGroups names must start by "/".'\
                                 .format(name))
            group = controller.cgroup(name)
            if not group:
                logging.warning(r'%14s - Configuration error: '\
                        '[%s/%s] cgroup NOT available',
                        'CGroups', kind, name)
                errors = True
                continue
            self._setup_group(tc, group)
        return not errors

    def _setup_group(self, tc, group):
        kind = group.controller.kind
        name = group.name
        # Configure each required attribute
        group.set(**tc['cgroups']['conf'][kind][name])

    def _target_configure(self, tc):
        self._print_header('TargetConfig',
                r'configuring target for [{}] experiments'\
                .format(tc['tag']))
        self._setup_kernel(tc)
        self._setup_sched_features(tc)
        self._setup_cpufreq(tc)
        return self._setup_cgroups(tc)

    def _target_conf_flag(self, tc, flag):
        if 'flags' not in tc:
            has_flag = False
        else:
            has_flag = flag in tc['flags']
        logging.debug('%14s - Check if target conf [%s] has flag [%s]: %s',
                'TargetConf', tc['tag'], flag, has_flag)
        return has_flag


################################################################################
# Workload Setup and Execution
################################################################################

    def _wload_cpus(self, wl_idx, wlspec):
        if not 'cpus' in wlspec['conf']:
            return None
        cpus = wlspec['conf']['cpus']

        if type(cpus) == list:
            return cpus
        if type(cpus) == int:
            return [cpus]

        # SMP target (or not bL module loaded)
        if not hasattr(self.target, 'bl'):
            if 'first' in cpus:
                return [ self.target.list_online_cpus()[0] ]
            if 'last' in cpus:
                return [ self.target.list_online_cpus()[-1] ]
            return self.target.list_online_cpus()

        # big.LITTLE target
        if cpus.startswith('littles'):
            if 'first' in cpus:
                return [ self.target.bl.littles_online[0] ]
            if 'last' in cpus:
                return [ self.target.bl.littles_online[-1] ]
            return self.target.bl.littles_online
        if cpus.startswith('bigs'):
            if 'first' in cpus:
                return [ self.target.bl.bigs_online[0] ]
            if 'last' in cpus:
                return [ self.target.bl.bigs_online[-1] ]
            return self.target.bl.bigs_online
        raise ValueError('Configuration error - '
                'unsupported [{}] \'cpus\' value for [{}] '\
                'workload specification'\
                .format(cpus, wl_idx))

    def _wload_task_idxs(self, wl_idx, tasks):
        if type(tasks) == int:
            return range(tasks)
        if tasks == 'cpus':
            return range(len(self.target.core_names))
        if tasks == 'little':
            return range(len([t
                for t in self.target.core_names
                if t == self.target.little_core]))
        if tasks == 'big':
            return range(len([t
                for t in self.target.core_names
                if t == self.target.big_core]))
        raise ValueError('Configuration error - '
                'unsupported \'tasks\' value for [{}] '\
                'RT-App workload specification'\
                .format(wl_idx))

    def _wload_rtapp(self, wl_idx, wlspec, cpus):
        conf = wlspec['conf']
        logging.debug(r'%14s - Configuring [%s] rt-app...',
                'RTApp', conf['class'])

        # Setup a default "empty" task name prefix
        if 'prefix' not in conf:
            conf['prefix'] = 'task_'

        # Setup a default loadref CPU
        loadref = None
        if 'loadref' in wlspec:
            loadref = wlspec['loadref']

        if conf['class'] == 'profile':
            params = {}
            # Load each task specification
            for task_name in conf['params']:
                task = conf['params'][task_name]
                task_name = conf['prefix'] + task_name
                if task['kind'] not in wlgen.RTA.__dict__:
                    logging.error(r'%14s - RTA task of kind [%s] not supported',
                            'RTApp', task['kind'])
                    raise ValueError('Configuration error - '
                        'unsupported \'kind\' value for task [{}] '\
                        'in RT-App workload specification'\
                        .format(task))
                task_ctor = getattr(wlgen.RTA, task['kind'])
                params[task_name] = task_ctor(**task['params'])
            rtapp = wlgen.RTA(self.target,
                        wl_idx, calibration = self.te.calibration())
            rtapp.conf(kind='profile', params=params, loadref=loadref,
                    cpus=cpus, run_dir=self.te.run_dir)
            return rtapp

        if conf['class'] == 'periodic':
            task_idxs = self._wload_task_idxs(wl_idx, conf['tasks'])
            params = {}
            for idx in task_idxs:
                task = conf['prefix'] + str(idx)
                params[task] = wlgen.Periodic(**conf['params']).get()
            rtapp = wlgen.RTA(self.target,
                        wl_idx, calibration = self.te.calibration())
            rtapp.conf(kind='profile', params=params, loadref=loadref,
                    cpus=cpus, run_dir=self.te.run_dir)
            return rtapp

        if conf['class'] == 'custom':
            rtapp = wlgen.RTA(self.target,
                        wl_idx, calibration = self.te.calib)
            rtapp.conf(kind='custom',
                    params=conf['json'],
                    duration=conf['duration'],
                    loadref=loadref,
                    cpus=cpus, run_dir=self.te.run_dir)
            return rtapp

        raise ValueError('Configuration error - '
                'unsupported \'class\' value for [{}] '\
                'RT-App workload specification'\
                .format(wl_idx))

    def _wload_perf_bench(self, wl_idx, wlspec, cpus):
        conf = wlspec['conf']
        logging.debug(r'%14s - Configuring perf_message...',
                'PerfMessage')

        if conf['class'] == 'messaging':
            perf_bench = wlgen.PerfMessaging(self.target, wl_idx)
            perf_bench.conf(**conf['params'])
            return perf_bench

        if conf['class'] == 'pipe':
            perf_bench = wlgen.PerfPipe(self.target, wl_idx)
            perf_bench.conf(**conf['params'])
            return perf_bench

        raise ValueError('Configuration error - '\
                'unsupported \'class\' value for [{}] '\
                'perf bench workload specification'\
                .format(wl_idx))

    def _wload_conf(self, wl_idx, wlspec):

        # CPUS: setup execution on CPUs if required by configuration
        cpus = self._wload_cpus(wl_idx, wlspec)

        # CGroup: setup CGroups if requried by configuration
        self._cgroup = self._default_cgroup
        if 'cgroup' in wlspec:
            if 'cgroups' not in self.target.modules:
                raise RuntimeError('Target not supporting CGroups or CGroups '
                                   'not configured for the current test configuration')
            self._cgroup = wlspec['cgroup']

        if wlspec['type'] == 'rt-app':
            return self._wload_rtapp(wl_idx, wlspec, cpus)
        if wlspec['type'] == 'perf_bench':
            return self._wload_perf_bench(wl_idx, wlspec, cpus)


        raise ValueError('Configuration error - '
                'unsupported \'type\' value for [{}] '\
                'workload specification'\
                .format(wl_idx))

    def _wload_init(self, tc, wl_idx):
        tc_idx = tc['tag']

        # Configure the test workload
        wlspec = self._tests_conf['wloads'][wl_idx]
        wload = self._wload_conf(wl_idx, wlspec)

        # Keep track of platform configuration
        self.te.test_dir = '{}/{}:{}:{}'\
            .format(self.te.res_dir, wload.wtype, tc_idx, wl_idx)
        os.system('mkdir -p ' + self.te.test_dir)
        self.te.platform_dump(self.te.test_dir)

        # Keep track of kernel configuration and version
        config = self.target.config
        with gzip.open(os.path.join(self.te.test_dir, 'kernel.config'), 'wb') as fh:
            fh.write(config.text)
        output = self.target.execute('{} uname -a'\
                .format(self.target.busybox))
        with open(os.path.join(self.te.test_dir, 'kernel.version'), 'w') as fh:
            fh.write(output)

        return wload

    def _wload_run_init(self, run_idx):
        self.te.out_dir = '{}/{}'\
                .format(self.te.test_dir, run_idx)
        logging.debug(r'%14s - out_dir [%s]', 'Executor', self.te.out_dir)
        os.system('mkdir -p ' + self.te.out_dir)

        logging.debug(r'%14s - cleanup target output folder', 'Executor')

        target_dir = self.target.working_directory
        logging.debug('%14s - setup target directory [%s]',
                'Executor', target_dir)

    def _wload_run(self, exp_idx, tc, wl_idx, wload, run_idx):
        tc_idx = tc['tag']

        self._print_title('Executor', 'Experiment {}/{}, [{}:{}] {}/{}'\
                .format(exp_idx, self._exp_count,
                        tc_idx, wl_idx,
                        run_idx, self._tests_conf['iterations']))

        # Setup local results folder
        self._wload_run_init(run_idx)

        # FTRACE: start (if a configuration has been provided)
        if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'):
            logging.warning('%14s - FTrace events collection enabled', 'Executor')
            self.te.ftrace.start()

        # ENERGY: start sampling
        if self.te.emeter:
            self.te.emeter.reset()

        # WORKLOAD: Run the configured workload
        wload.run(out_dir=self.te.out_dir, cgroup=self._cgroup)

        # ENERGY: collect measurements
        if self.te.emeter:
            self.te.emeter.report(self.te.out_dir)

        # FTRACE: stop and collect measurements
        if self.te.ftrace and self._target_conf_flag(tc, 'ftrace'):
            self.te.ftrace.stop()

            trace_file = self.te.out_dir + '/trace.dat'
            self.te.ftrace.get_trace(trace_file)
            logging.info(r'%14s - Collected FTrace binary trace:', 'Executor')
            logging.info(r'%14s -    %s', 'Executor',
                         trace_file.replace(self.te.res_dir, '<res_dir>'))

            stats_file = self.te.out_dir + '/trace_stat.json'
            self.te.ftrace.get_stats(stats_file)
            logging.info(r'%14s - Collected FTrace function profiling:', 'Executor')
            logging.info(r'%14s -    %s', 'Executor',
                         stats_file.replace(self.te.res_dir, '<res_dir>'))

        self._print_footer('Executor')

################################################################################
# Utility Functions
################################################################################

    def _print_section(self, tag, message):
        logging.info('')
        logging.info(FMT_SECTION)
        logging.info(r'%14s - %s', tag, message)
        logging.info(FMT_SECTION)

    def _print_header(self, tag, message):
        logging.info('')
        logging.info(FMT_HEADER)
        logging.info(r'%14s - %s', tag, message)

    def _print_title(self, tag, message):
        logging.info(FMT_TITLE)
        logging.info(r'%14s - %s', tag, message)

    def _print_footer(self, tag, message=None):
        if message:
            logging.info(r'%14s - %s', tag, message)
        logging.info(FMT_FOOTER)
    # Device
    # By default the device connected is detected, but if more than 1
    # device, override the following to get a specific device.
    # "device"       : "HT6880200489",

    # Folder where all the results will be collected
    "results_dir" : "CameraFlashlight",

    # Define devlib modules to load
    "modules"     : [
        'cpufreq',      # enable CPUFreq support
    ],

    "emeter" : {
        'instrument': 'monsoon',
        'conf': { }
    },

    # Tools required by the experiments
    "tools"   : [],
}

if args.serial:
    my_conf["device"] = args.serial

# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False)
target = te.target

results = experiment()
Esempio n. 23
0
 def setUp(self):
     self.env = TestEnv()
     self.tracker = self.env.tracker
     self.issue = Issue(self.tracker)