def __init__(self): """ Set up logging and trigger running experiments """ LisaLogging.setup() self._log = logging.getLogger('Benchmark') self._log.info('=== CommandLine parsing...') self.args = self._parseCommandLine() self._log.info('=== TestEnv setup...') self.bm_conf = self._getBmConf() self.te = TestEnv(self.bm_conf) self.target = self.te.target self._log.info('=== Initialization...') self.wl = self._getWorkload() self.out_dir = self.te.res_dir try: self.benchmarkInit() except: self._log.warning( 'Benchmark initialization failed: execution aborted') raise self._log.info('=== Execution...') self.wl.run(out_dir=self.out_dir, collect=self._getBmCollect(), **self.bm_params) self._log.info('=== Finalization...') self.benchmarkFinalize()
def __init__(self): """ Set up logging and trigger running experiments """ LisaLogging.setup() self._log = logging.getLogger('Benchmark') self._log.info('=== CommandLine parsing...') self.args = self._parseCommandLine() self._log.info('=== TestEnv setup...') self.bm_conf = self._getBmConf() self.te = TestEnv(self.bm_conf) self.target = self.te.target self._log.info('=== Initialization...') self.wl = self._getWorkload() self.out_dir = self.te.res_dir try: self._preInit() self.benchmarkInit() except: self._log.warning( 'Benchmark initialization failed: execution aborted') raise self._log.info('=== Execution...') for iter_id in range(1, self.bm_iterations + 1): self._log.info('=== Iteration {}/{}...'.format( iter_id, self.bm_iterations)) out_dir = os.path.join(self.out_dir, "{:03d}".format(iter_id)) try: os.makedirs(out_dir) except: pass self._preRun() self.wl.run(out_dir=out_dir, collect=self._getBmCollect(), **self.bm_params) self._log.info('=== Finalization...') self.benchmarkFinalize()
def __init__(self): """ Set up logging and trigger running experiments """ LisaLogging.setup() self._log = logging.getLogger('Benchmark') self._log.info('=== CommandLine parsing...') self.args = self._parseCommandLine() self._log.info('=== TestEnv setup...') self.bm_conf = self._getBmConf() self.te = TestEnv(self.bm_conf) self.target = self.te.target self._log.info('=== Initialization...') self.wl = self._getWorkload() self.out_dir=self.te.res_dir try: self._preInit() self.benchmarkInit() except: self._log.warning('Benchmark initialization failed: execution aborted') raise self._log.info('=== Execution...') for iter_id in range(1, self.bm_iterations+1): self._log.info('=== Iteration {}/{}...'.format(iter_id, self.bm_iterations)) out_dir = os.path.join(self.out_dir, "{:03d}".format(iter_id)) try: os.makedirs(out_dir) except: pass self._preRun() self.wl.run(out_dir=out_dir, collect=self._getBmCollect(), **self.bm_params) self._log.info('=== Finalization...') self.benchmarkFinalize()
# not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from conf import LisaLogging LisaLogging.setup() import json import os import devlib from env import TestEnv from android import Screen, Workload, System from trace import Trace import trappy import pandas as pd import sqlite3 import argparse import shutil parser = argparse.ArgumentParser(description='CameraFlashlight') parser.add_argument('--out_prefix', dest='out_prefix', action='store', default='default',