Esempio n. 1
0
    def setUpClass(cls):
        cls.params = {}
        cls.env = TestEnv(test_conf=TEST_CONF)
        cls.trace_file = os.path.join(cls.env.res_dir, "offload_idle_pull.dat")
        cls.log_file = os.path.join(cls.env.res_dir, "offload_idle_pull.json")
        cls.early_starters = []
        cls.migrators = []
        cls.num_tasks = len(cls.env.target.bl.bigs)
        cls.populate_tasks()
        local_setup(cls.env)
        cls.run_workload()

        cls.trace = trappy.FTrace(cls.trace_file)
        cls.m_assert = SchedMultiAssert(cls.trace,
                                        cls.env.topology,
                                        execnames=cls.migrators)
        cls.e_assert = SchedMultiAssert(cls.trace,
                                        cls.env.topology,
                                        execnames=cls.early_starters)

        all_tasks = cls.early_starters + cls.migrators
        cls.a_assert = SchedMultiAssert(cls.trace,
                                        cls.env.topology,
                                        execnames=all_tasks)
        cls.offset = cls.get_offset()

        cls.end_times = cls.calculate_end_times()
        cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w")
Esempio n. 2
0
    def runExperiments(cls):
        """
        Set up logging and trigger running experiments
        """
        cls.logger = logging.getLogger('LisaTest')

        cls.logger.info('Setup tests execution engine...')
        test_env = TestEnv(test_conf=cls._getTestConf())

        experiments_conf = cls._getExperimentsConf(test_env)
        cls.executor = Executor(test_env, experiments_conf)

        # Alias tests and workloads configurations
        cls.wloads = cls.executor._experiments_conf["wloads"]
        cls.confs = cls.executor._experiments_conf["confs"]

        # Alias executor objects to make less verbose tests code
        cls.te = cls.executor.te
        cls.target = cls.executor.target

        # Execute pre-experiments code defined by the test
        cls._experimentsInit()

        cls.logger.info('Experiments execution...')
        cls.executor.run()

        cls.experiments = cls.executor.experiments

        # Execute post-experiments code defined by the test
        cls._experimentsFinalize()
Esempio n. 3
0
    def __init__(self):
        """
        Set up logging and trigger running experiments
        """
        LisaLogging.setup()
        self._log = logging.getLogger('Benchmark')

        self._log.info('=== CommandLine parsing...')
        self.args = self._parseCommandLine()

        self._log.info('=== TestEnv setup...')
        self.bm_conf = self._getBmConf()
        self.te = TestEnv(self.bm_conf)
        self.target = self.te.target

        self._log.info('=== Initialization...')
        self.wl = self._getWorkload()
        self.out_dir = self.te.res_dir
        try:
            self.benchmarkInit()
        except:
            self._log.warning(
                'Benchmark initialization failed: execution aborted')
            raise

        self._log.info('=== Execution...')
        self.wl.run(out_dir=self.out_dir,
                    collect=self._getBmCollect(),
                    **self.bm_params)

        self._log.info('=== Finalization...')
        self.benchmarkFinalize()
Esempio n. 4
0
    def runExperiments(cls):
        """
        Set up logging and trigger running experiments
        """
        cls._log = logging.getLogger('LisaTest')

        cls._log.info('Setup tests execution engine...')
        te = TestEnv(test_conf=cls._getTestConf())

        experiments_conf = cls._getExperimentsConf(te)
        test_dir = os.path.join(te.res_dir,
                                experiments_conf['confs'][0]['tag'])
        os.makedirs(test_dir)

        # Setting cpufreq governor to performance
        te.target.cpufreq.set_all_governors('performance')

        # Creating cgroups hierarchy
        cpuset_cnt = te.target.cgroups.controller('cpuset')
        cpu_cnt = te.target.cgroups.controller('cpu')

        max_duration = 0
        for se in cls.root_group.iter_nodes():
            if se.is_task:
                max_duration = max(max_duration, se.duration_s)

        # Freeze userspace tasks
        cls._log.info('Freezing userspace tasks')
        te.target.cgroups.freeze(Executor.critical_tasks[te.target.os])

        cls._log.info('FTrace events collection enabled')
        te.ftrace.start()

        # Run tasks
        cls._log.info('Running the tasks')
        # Run all tasks in background and wait for completion
        for se in cls.root_group.iter_nodes():
            if se.is_task:
                # Run tasks
                se.wload.run(out_dir=test_dir,
                             cpus=se.cpus,
                             cgroup=se.parent.name,
                             background=True)
        sleep(max_duration)

        te.ftrace.stop()

        trace_file = os.path.join(test_dir, 'trace.dat')
        te.ftrace.get_trace(trace_file)
        cls._log.info('Collected FTrace binary trace: %s', trace_file)

        # Un-freeze userspace tasks
        cls._log.info('Un-freezing userspace tasks')
        te.target.cgroups.freeze(thaw=True)

        # Extract trace
        cls.trace = Trace(None, test_dir, te.ftrace.events)
Esempio n. 5
0
    def setUpClass(cls):
        cls.params = {}
        cls.env = TestEnv(test_conf=TEST_CONF)
        cls.trace_file = os.path.join(cls.env.res_dir, "cap_cap.dat")
        cls.populate_params()

        cls.run_workload()

        trace = trappy.FTrace(cls.trace_file)
        cls.sa = SchedMultiAssert(trace,
                                  cls.env.topology,
                                  execnames=cls.params.keys())
Esempio n. 6
0
 def setUpClass(cls):
     cls.params = {}
     cls.task_prefix = "fmig"
     cls.env = TestEnv(test_conf=TEST_CONF)
     cls.trace_file = os.path.join(cls.env.res_dir, "fork_migration.dat")
     cls.log_file = os.path.join(cls.env.res_dir, "fork_migration.json")
     cls.populate_params()
     cls.tasks = cls.params.keys()
     cls.num_tasks = len(cls.tasks)
     local_setup(cls.env)
     cls.run_workload()
     cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w")
Esempio n. 7
0
    def setUpClass(cls):
        cls.params = {}
        cls.env = TestEnv(test_conf=TEST_CONF)
        cls.trace_file = os.path.join(cls.env.res_dir, "cap_cap.dat")
        cls.populate_params()

        cls.run_workload()

        trace = trappy.FTrace(cls.trace_file)
        cls.sa = SchedMultiAssert(trace, cls.env.topology,
                                  execnames=cls.params.keys())
        times = cls.sa.getStartTime()
        cls.wload_start_time = min(t["starttime"] for t in times.itervalues())
Esempio n. 8
0
File: rfc.py Progetto: raw-bin/lisa
    def setUpTest(cls, tests_config):

        # Initialize globals
        cls.kernel = None
        cls.dtb = None
        cls.governor = None
        cls.cgroup = None

        cls.print_section('Main', 'Experiments configuration')

        # Load test specific configuration
        tests_config = os.path.join('tests/eas', tests_config)
        logging.info('%14s - Loading EAS RFC tests configuration [%s]...',
                     'Main', tests_config)
        json_conf = JsonConf(tests_config)
        cls.conf = json_conf.load()

        # Check for mandatory configurations
        if 'confs' not in cls.conf or not cls.conf['confs']:
            raise ValueError(
                'Configuration error: missing \'conf\' definitions')
        if 'wloads' not in cls.conf or not cls.conf['wloads']:
            raise ValueError(
                'Configuration error: missing \'wloads\' definitions')

        # Setup devlib to access the configured target
        cls.env = TestEnv(test_conf=cls.conf)

        # Compute total number of experiments
        cls.exp_count = cls.conf['iterations'] \
                * len(cls.conf['wloads']) \
                * len(cls.conf['confs'])

        cls.print_section('Main', 'Experiments execution')

        # Run all the configured experiments
        exp_idx = 1
        for tc in cls.conf['confs']:
            # TARGET: configuration
            if not cls.target_configure(tc):
                continue
            for wl_idx in cls.conf['wloads']:
                # TEST: configuration
                wload = cls.wload_init(tc, wl_idx)
                for itr_idx in range(1, cls.conf['iterations'] + 1):
                    # WORKLOAD: execution
                    cls.wload_run(exp_idx, tc, wl_idx, wload, itr_idx)
                    exp_idx += 1

        cls.print_section('Main', 'Experiments post-processing')
Esempio n. 9
0
 def setUp(self):
     self.res_dir = 'test_{}'.format(self.__class__.__name__)
     self.te = TestEnv(
         target_conf={
             'platform': 'host',
             # With no cpufreq (see below), we won't be able to do
             # calibration. Provide dummy.
             'rtapp-calib': {c: 100
                             for c in range(64)}
         },
         test_conf={
             # Don't load cpufreq, it won't work when platform=host
             'exclude_modules': ['cpufreq'],
         },
         force_new=True)
Esempio n. 10
0
 def setUpClass(cls):
     cls.params = {}
     cls.env = TestEnv(test_conf=TEST_CONF)
     cls.task_prefix = "wmig"
     cls.trace_file = os.path.join(cls.env.res_dir, "wake_migration.dat")
     cls.log_file = os.path.join(cls.env.res_dir, "wake_migration.json")
     cls.populate_params()
     cls.tasks = cls.params.keys()
     cls.num_tasks = len(cls.tasks)
     local_setup(cls.env)
     cls.run_workload()
     cls.s_assert = SchedMultiAssert(cls.trace_file,
                                     cls.env.topology,
                                     execnames=cls.tasks)
     cls.offset = cls.get_offset(cls.tasks[0])
     cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w")
Esempio n. 11
0
 def setUpClass(cls):
     cls.params = {}
     cls.task_prefix = "stp"
     cls.env = TestEnv(test_conf=TEST_CONF)
     cls.trace_file = os.path.join(cls.env.res_dir,
                                   "small_task_packing.dat")
     cls.log_file = os.path.join(cls.env.res_dir, "small_task_packing.json")
     cls.num_tasks = len(cls.env.target.bl.bigs + cls.env.target.bl.littles)
     cls.populate_params()
     cls.tasks = cls.params.keys()
     local_setup(cls.env)
     cls.run_workload()
     cls.s_assert = SchedMultiAssert(cls.trace_file,
                                     cls.env.topology,
                                     execnames=cls.tasks)
     cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w")
Esempio n. 12
0
    def runExperiments(cls):
        cls.te = TestEnv(test_conf=cls._getTestConf())
        cls.target = cls.te.target
        cls._log = logging.getLogger('CpuhpTest')

        # Choose a random seed explicitly if not given
        if cls.hp_stress.get('seed') is None:
            random.seed()
            cls.hp_stress['seed'] = random.randint(0, sys.maxint)
        random.seed(cls.hp_stress['seed'])
        cls._log.info('Random sequence of CPU Hotplug generated with: ')
        cls._log.info(cls.hp_stress)
        with open(os.path.join(cls.te.res_dir, 'hotplug_cfg.json'), 'w') as f:
            json.dump(cls.hp_stress, f, sort_keys=True, indent=4)

        # Play with (online) hotpluggable CPUs only
        cls.target.hotplug.online_all()
        cls.hotpluggable_cpus = filter(
                lambda cpu: cls.target.file_exists(cls._cpuhp_path(cpu)),
                cls.target.list_online_cpus())
        if not cls.hotpluggable_cpus:
            raise RuntimeError('Cannot find any hotpluggable CPU online')
        cls._log.info('Hotpluggable CPUs found on target: ')
        cls._log.info(cls.hotpluggable_cpus)

        # Run random hotplug sequence on target
        cls.cpuhp_seq_script = cls._random_cpuhp_script(cls.duration_sec)
        cls.cpuhp_seq_script.push()
        msg = 'Starting hotplug stress for {} seconds'
        cls._log.info(msg.format(cls.duration_sec))
        cls.target_alive = True

        # The script should run on the target for 'cls.duration_sec' seconds.
        # If there is no life sign of the target 1 minute after that, we
        # consider it dead.
        timeout = cls.duration_sec + 60
        try:
            cls.cpuhp_seq_script.run(as_root=True, timeout=timeout)
        except TimeoutError:
            msg = 'Target not responding after {} seconds ...'
            cls._log.info(msg.format(timeout))
            cls.target_alive = False
            return

        cls._log.info('Hotplug stress completed')
        cls.target.hotplug.online_all()
Esempio n. 13
0
    def __init__(self):
        """
        Set up logging and trigger running experiments
        """
        LisaLogging.setup()
        self._log = logging.getLogger('Benchmark')

        self._log.info('=== CommandLine parsing...')
        self.args = self._parseCommandLine()

        self._log.info('=== TestEnv setup...')
        self.bm_conf = self._getBmConf()
        self.te = TestEnv(self.bm_conf)
        self.target = self.te.target

        self._log.info('=== Initialization...')
        self.wl = self._getWorkload()
        self.out_dir = self.te.res_dir
        try:
            self._preInit()
            self.benchmarkInit()
        except:
            self._log.warning(
                'Benchmark initialization failed: execution aborted')
            raise

        self._log.info('=== Execution...')
        for iter_id in range(1, self.bm_iterations + 1):
            self._log.info('=== Iteration {}/{}...'.format(
                iter_id, self.bm_iterations))
            out_dir = os.path.join(self.out_dir, "{:03d}".format(iter_id))
            try:
                os.makedirs(out_dir)
            except:
                pass

            self._preRun()

            self.wl.run(out_dir=out_dir,
                        collect=self._getBmCollect(),
                        **self.bm_params)

        self._log.info('=== Finalization...')
        self.benchmarkFinalize()
Esempio n. 14
0
 def setUpClass(cls):
     cls._log = logging.getLogger('StHoldTest')
     cls.params = {}
     # connect the target
     cls.env = TestEnv(test_conf=cls._getTestConf())
     cls.trace_file = os.path.join(cls.env.res_dir, '{}_trace.dat'.format(cls.task_name))
     cls.target = cls.env.target
     # check that the SCHEDTUNE_HOLD_ALL feature exists in the target
     cls.check_hold_available()
     # check any per-class requirements
     cls.child_check_config()
     cls.rta_name = 'rt-test-{}'.format(cls.task_name.replace('/',''))
     # ask derived class to populate workload config etc.
     cls.populate_params()
     # ensure required cgroups exist
     cls.setup_cgroups()
     # run workload
     with cls.env.freeze_userspace():
         cls.run_workload()
     # collect and parse the trace
     cls.ftrace_obj = FTrace(cls.trace_file)
Esempio n. 15
0
    def runExperiments(cls):
        """
        Set up logging and trigger running experiments
        """
        cls._log = logging.getLogger('LisaTest')

        cls._log.info('Setup tests execution engine...')
        test_env = TestEnv(test_conf=cls._getTestConf())

        experiments_conf = cls._getExperimentsConf(test_env)

        if ITERATIONS_FROM_CMDLINE:
            if 'iterations' in experiments_conf:
                cls.logger.warning("Command line overrides iteration count in "
                                   "{}'s experiments_conf".format(
                                       cls.__name__))
            experiments_conf['iterations'] = ITERATIONS_FROM_CMDLINE

        cls.executor = Executor(test_env, experiments_conf)

        # Alias tests and workloads configurations
        cls.wloads = cls.executor._experiments_conf["wloads"]
        cls.confs = cls.executor._experiments_conf["confs"]

        # Alias executor objects to make less verbose tests code
        cls.te = cls.executor.te
        cls.target = cls.executor.target

        # Execute pre-experiments code defined by the test
        cls._experimentsInit()

        cls._log.info('Experiments execution...')
        cls.executor.run()

        cls.experiments = cls.executor.experiments

        # Execute post-experiments code defined by the test
        cls._experimentsFinalize()
Esempio n. 16
0
from stable_baselines.common.policies import MlpLnLstmPolicy, MlpPolicy
from stable_baselines.common.vec_env import SubprocVecEnv, DummyVecEnv
from stable_baselines import PPO2

from env import TestEnv

if __name__ == '__main__':
    Test_env = SubprocVecEnv([lambda: TestEnv() for i in range(4)])

    model = PPO2(MlpLnLstmPolicy, Test_env, verbose=0, nminibatches=4)
    model.learn(total_timesteps=10000)
    # Device
    # By default the device connected is detected, but if more than 1
    # device, override the following to get a specific device.
    # "device"       : "HT6880200489",

    # Folder where all the results will be collected
    "results_dir" : "CameraFlashlight",

    # Define devlib modules to load
    "modules"     : [
        'cpufreq',      # enable CPUFreq support
    ],

    "emeter" : {
        'instrument': 'monsoon',
        'conf': { }
    },

    # Tools required by the experiments
    "tools"   : [],
}

if args.serial:
    my_conf["device"] = args.serial

# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False)
target = te.target

results = experiment()
Esempio n. 18
0
 def setUp(self):
     self.env = TestEnv()
     self.tracker = self.env.tracker
     self.issue = Issue(self.tracker)
Esempio n. 19
0
    def __init__(self, target_conf=None, tests_conf=None):
        """
        Tests Executor

        A tests executor is a module which support the execution of a
        configured set of experiments. Each experiment is composed by:
        - a target configuration
        - a worload to execute

        The executor module can be configured to run a set of workloads
        (wloads) in each different target configuration of a specified set
        (confs). These wloads and confs can be specified by the "tests_config"
        input dictionary.

        All the results generated by each experiment will be collected a result
        folder which is named according to this template:
            results/<test_id>/<wltype>:<conf>:<wload>/<run_id>
        where:
        - <test_id> : the "tid" defined by the tests_config, or a timestamp
                      based folder in case "tid" is not specified
        - <wltype>  : the class of workload executed, e.g. rtapp or sched_perf
        - <conf>    : the identifier of one of the specified configurations
        - <wload>   : the identified of one of the specified workload
        - <run_id>  : the progressive execution number from 1 up to the
                      specified iterations
        """

        # Initialize globals
        self._default_cgroup = None
        self._cgroup = None

        # Setup test configuration
        if isinstance(tests_conf, dict):
            logging.info('%14s - Loading custom (inline) test configuration',
                    'Target')
            self._tests_conf = tests_conf
        elif isinstance(tests_conf, str):
            logging.info('%14s - Loading custom (file) test configuration',
                    'Target')
            json_conf = JsonConf(tests_conf)
            self._tests_conf = json_conf.load()
        else:
            raise ValueError('test_conf must be either a dictionary or a filepath')

        # Check for mandatory configurations
        if 'confs' not in self._tests_conf or not self._tests_conf['confs']:
            raise ValueError(
                    'Configuration error: missing \'conf\' definitions')
        if 'wloads' not in self._tests_conf or not self._tests_conf['wloads']:
            raise ValueError(
                    'Configuration error: missing \'wloads\' definitions')

        # Setup devlib to access the configured target
        self.te = TestEnv(target_conf, tests_conf)
        self.target = self.te.target

        # Compute total number of experiments
        self._exp_count = self._tests_conf['iterations'] \
                * len(self._tests_conf['wloads']) \
                * len(self._tests_conf['confs'])

        self._print_section('Executor', 'Experiments configuration')

        logging.info('%14s - Configured to run:', 'Executor')

        logging.info('%14s -   %3d targt configurations:',
                     'Executor', len(self._tests_conf['confs']))
        target_confs = [conf['tag'] for conf in self._tests_conf['confs']]
        target_confs = ', '.join(target_confs)
        logging.info('%14s -       %s', 'Executor', target_confs)

        logging.info('%14s -   %3d workloads (%d iterations each)',
                     'Executor', len(self._tests_conf['wloads']),
                     self._tests_conf['iterations'])
        wload_confs = ', '.join(self._tests_conf['wloads'])
        logging.info('%14s -       %s', 'Executor', wload_confs)

        logging.info('%14s - Total: %d experiments',
                     'Executor', self._exp_count)

        logging.info('%14s - Results will be collected under:', 'Executor')
        logging.info('%14s -       %s', 'Executor', self.te.res_dir)
Esempio n. 20
0
 def setUp(self):
     self.env = TestEnv()
     self.tracker = self.env.tracker
Esempio n. 21
0
        # 'cgroups'     # Enable for cgroup support
    ],
    "emeter": {
        'instrument': 'monsoon',
        'conf': {}
    },
    "systrace": {
        'extra_categories': ['binder_driver'],
        "extra_events": ["binder_transaction_alloc_buf"],
    },
    # Tools required by the experiments
    "tools": ['taskset'],
    "skip_nrg_model": True,
}

te = TestEnv(conf, wipe=False)
target = te.target


def run_page_stats(duration, frequency):
    procs = {}
    for i in range(int(duration / frequency)):
        ss = target.execute("cat /d/binder/stats")
        proc_dump = re.split("\nproc ", ss)[1:]

        for proc in proc_dump[1:]:
            lines = proc.split("\n  ")
            proc_id = lines[0]
            page = re.search("pages: (\d+:\d+:\d+)", proc)
            active, lru, free = map(int, page.group(1).split(":"))
            if proc_id not in procs:
Esempio n. 22
0
 def setUp(self):
     self.env = TestEnv()
Esempio n. 23
0
 def setUp(self):
     # a random path to init trackers at
     self.path = get_uuid() 
     self.env = TestEnv(False)
Esempio n. 24
0
 def setUpClass(cls):
     cls.env = TestEnv(test_conf=TEST_CONF)
     cls.target = cls.env.target