Ejemplo n.º 1
0
class HwmonInstrument(Instrument):

    name = 'hwmon'
    description = """
    Hardware Monitor (hwmon) is a generic Linux kernel subsystem,
    providing access to hardware monitoring components like temperature or
    voltage/current sensors.

    The following web page has more information:

        http://blogs.arm.com/software-enablement/925-linux-hwmon-power-management-and-arm-ds-5-streamline/

    You can specify which sensors HwmonInstrument looks for by specifying
    hwmon_sensors in your config.py, e.g. ::

        hwmon_sensors = ['energy', 'temp']

    If this setting is not specified, it will look for all sensors it knows about.
    Current valid values are::

        :energy: Collect energy measurements and report energy consumed
                 during run execution (the diff of before and after readings)
                 in Joules.
        :temp: Collect temperature measurements and report the before and
               after readings in degrees Celsius.

    """

    parameters = [
        Parameter('sensors', kind=list_of_strs, default=['energy', 'temp'],
                  global_alias='hwmon_sensors',
                  description='The kinds of sensors hwmon instrument will look for')
    ]

    def __init__(self, device, **kwargs):
        super(HwmonInstrument, self).__init__(device, **kwargs)

        if self.sensors:
            self.sensor_kinds = {}
            for kind in self.sensors:
                if kind in HWMON_SENSORS:
                    self.sensor_kinds[kind] = HWMON_SENSORS[kind]
                else:
                    message = 'Unexpected sensor type: {}; must be in {}'.format(kind, HWMON_SENSORS.keys())
                    raise ConfigError(message)
        else:
            self.sensor_kinds = HWMON_SENSORS

        self.sensors = []

    def initialize(self, context):
        self.sensors = []
        self.logger.debug('Searching for HWMON sensors.')
        discovered_sensors = discover_sensors(self.device, self.sensor_kinds.keys())
        for sensor in sorted(discovered_sensors, key=lambda s: HWMON_SENSOR_PRIORITIES.index(s.kind)):
            self.logger.debug('Adding {}'.format(sensor.filepath))
            self.sensors.append(sensor)

    def setup(self, context):
        for sensor in self.sensors:
            sensor.clear_readings()

    def fast_start(self, context):
        for sensor in reversed(self.sensors):
            sensor.take_reading()

    def fast_stop(self, context):
        for sensor in self.sensors:
            sensor.take_reading()

    def update_result(self, context):
        for sensor in self.sensors:
            try:
                report_type, units, conversion = HWMON_SENSORS[sensor.kind]
                if report_type == 'diff':
                    before, after = sensor.readings
                    diff = conversion(after - before)
                    context.result.add_metric(sensor.label, diff, units)
                elif report_type == 'before/after':
                    before, after = sensor.readings
                    context.result.add_metric(sensor.label + ' before', conversion(before), units)
                    context.result.add_metric(sensor.label + ' after', conversion(after), units)
                else:
                    raise InstrumentError('Unexpected report_type: {}'.format(report_type))
            except ValueError, e:
                self.logger.error('Could not collect all {} readings for {}'.format(sensor.kind, sensor.label))
                self.logger.error('Got: {}'.format(e))
Ejemplo n.º 2
0
class ServoPowerMonitor(Instrument):

    name = 'servo_power'
    description = """
    Collects power traces using the Chromium OS Servo Board.

    Servo is a debug board used for Chromium OS test and development. Among other uses, it allows
    access to the built in power monitors (if present) of a Chrome OS device. More information on
    Servo board can be found in the link bellow:

    https://www.chromium.org/chromium-os/servo

    In order to use this instrument you need to be a sudoer and you need a chroot environment. More
    information on the chroot environment can be found on the link bellow:

    https://www.chromium.org/chromium-os/developer-guide

    If you wish to run servod on a remote machine you will need to allow it to accept external connections
    using the `--host` command line option, like so:
    `sudo servod -b some_board -c some_board.xml --host=''`

    """

    parameters = [
        Parameter('power_domains', kind=list_of_strings, default=[],
                  description="""The names of power domains to be monitored by the
                                 instrument using servod."""),
        Parameter('labels', kind=list_of_strings, default=[],
                  description="""Meaningful labels for each of the monitored domains."""),
        Parameter('chroot_path', kind=str,
                  description="""Path to chroot direcory on the host."""),
        Parameter('sampling_rate', kind=int, default=10,
                  description="""Samples per second."""),
        Parameter('board_name', kind=str, mandatory=True,
                  description="""The name of the board under test."""),
        Parameter('autostart', kind=bool, default=True,
                  description="""Automatically start `servod`. Set to `False` if you want to
                                 use an already running `servod` instance or a remote servo"""),
        Parameter('host', kind=str, default="localhost",
                  description="""When `autostart` is set to `False` you can specify the host
                                 on which `servod` is running allowing you to remotelly access
                                 as servo board.

                                 if `autostart` is `True` this parameter is ignored and `localhost`
                                 is used instead"""),
        Parameter('port', kind=int, default=9999,
                  description="""When `autostart` is set to false you must provide the port
                                 that `servod` is running on

                                 If `autostart` is `True` this parameter is ignored and the port
                                 output during the startup of `servod` will be used."""),
        Parameter('vid', kind=str,
                  description="""When more than one servo is plugged in, you must provide
                                 a vid/pid pair to identify the servio you wish to use."""),
        Parameter('pid', kind=str,
                  description="""When more than one servo is plugged in, you must provide
                                 a vid/pid pair to identify the servio you wish to use."""),
    ]

    # When trying to initialize servod, it may take some time until the server is up
    # Therefore we need to poll to identify when the sever has successfully started
    # servod_max_tries specifies the maximum number of times we will check to see if the server has started
    # while servod_delay_between_tries is the sleep time between checks.
    servod_max_tries = 100
    servod_delay_between_tries = 0.1

    def validate(self):
        # pylint: disable=access-member-before-definition
        if self.labels and len(self.power_domains) != len(self.labels):
            raise ConfigError('There should be exactly one label per power domain')
        if self.autostart:
            if self.host != 'localhost':  # pylint: disable=access-member-before-definition
                self.logger.warning('Ignoring host "%s" since autostart is set to "True"', self.host)
                self.host = "localhost"
        if (self.vid is None) != (self.pid is None):
            raise ConfigError('`vid` and `pid` must both be specified')

    def initialize(self, context):
        # pylint: disable=access-member-before-definition
        self.poller = None
        self.data = None
        self.stopped = True

        if self.device.platform != "chromeos":
            raise InstrumentError("servo_power instrument only supports Chrome OS devices.")

        if not self.labels:
            self.labels = ["PORT_{}".format(channel) for channel, _ in enumerate(self.power_domains)]

        self.power_domains = [channel if channel.endswith("_mw") else
                              "{}_mw".format(channel) for channel in self.power_domains]
        self.label_map = {pd: l for pd, l in zip(self.power_domains, self.labels)}

        if self.autostart:
            self._start_servod()

    def setup(self, context):
        # pylint: disable=access-member-before-definition
        self.outfile = os.path.join(context.output_directory, 'servo.csv')
        self.poller = PowerPoller(self.host, self.port, self.power_domains, self.sampling_rate)

    def start(self, context):
        self.poller.start()
        self.stopped = False

    def stop(self, context):
        self.data = self.poller.stop()
        self.poller.join()
        self.stopped = True

        timestamps = self.data.pop("timestamp")
        for channel, data in self.data.iteritems():
            label = self.label_map[channel]
            data = [float(v) / 1000.0 for v in data]
            sample_sum = sum(data)

            metric_name = '{}_power'.format(label)
            power = sample_sum / len(data)
            context.result.add_metric(metric_name, round(power, 3), 'Watts')

            metric_name = '{}_energy'.format(label)
            energy = sample_sum * (1.0 / self.sampling_rate)
            context.result.add_metric(metric_name, round(energy, 3), 'Joules')

        with open(self.outfile, 'wb') as f:
            c = csv.writer(f)
            headings = ['timestamp'] + ['{}_power'.format(label) for label in self.labels]
            c.writerow(headings)
            for row in zip(timestamps, *self.data.itervalues()):
                c.writerow(row)

    def teardown(self, context):
        if not self.stopped:
            self.stop(context)
        if self.autostart:
            self.server_session.kill_session()

    def _start_servod(self):
        in_chroot = False if which('dut-control') is None else True
        password = ''
        if not in_chroot:
            msg = 'Instrument %s requires sudo access on this machine to start `servod`'
            self.logger.info(msg, self.name)
            self.logger.info('You need to be sudoer to use it.')
            password = getpass.getpass()
            check = subprocess.call('echo {} | sudo -S ls > /dev/null'.format(password), shell=True)
            if check:
                raise InstrumentError('Given password was either wrong or you are not a sudoer')
        self.server_session = CrosSdkSession(self.chroot_path, password=password)
        password = ''

        command = 'sudo servod -b {b} -c {b}.xml'
        if self.vid and self.pid:
            command += " -v " + self.vid
            command += " -p " + self.pid
        command += '&'
        self.server_session.send_command(command.format(b=self.board_name))
        for _ in xrange(self.servod_max_tries):
            server_lines = self.server_session.get_lines(timeout=1, from_stderr=True,
                                                         timeout_only_for_first_line=False)
            if server_lines:
                if 'Listening on' in server_lines[-1]:
                    self.port = int(server_lines[-1].split()[-1])
                    break
            time.sleep(self.servod_delay_between_tries)
        else:
            raise InstrumentError('Failed to start servod in cros_sdk environment')
Ejemplo n.º 3
0
class ApplaunchWorkload(Workload):

    name = 'applaunch'
    description = """
    Measures the time and energy used in launching an application.

    """
    supported_platforms = ['android']

    parameters = [
        Parameter('app',
                  default='browser',
                  allowed_values=['calculator', 'browser', 'calendar'],
                  description='The name of the application to measure.'),
        Parameter(
            'set_launcher_affinity',
            kind=bool,
            default=True,
            description=
            ('If ``True``, this will explicitly set the affinity of the launcher '
             'process to the A15 cluster.')),
        Parameter('times',
                  kind=int,
                  default=8,
                  description='Number of app launches to do on the device.'),
        Parameter('measure_energy',
                  kind=boolean,
                  default=False,
                  description="""
                  Specfies wether energy measurments should be taken during the run.

                  .. note:: This depends on appropriate sensors to be exposed through HWMON.

                  """),
        Parameter(
            'cleanup',
            kind=boolean,
            default=True,
            description=
            'Specifies whether to clean up temporary files on the device.'),
    ]

    def __init__(self, device, **kwargs):
        super(ApplaunchWorkload, self).__init__(device, **kwargs)
        if not jinja2:
            raise WorkloadError(
                'Please install jinja2 Python package: "sudo pip install jinja2"'
            )
        filename = '{}-{}.sh'.format(self.name, self.app)
        self.host_script_file = os.path.join(settings.meta_directory, filename)
        self.device_script_file = os.path.join(self.device.working_directory,
                                               filename)
        self._launcher_pid = None
        self._old_launcher_affinity = None
        self.sensors = []

    def on_run_init(self, context):  # pylint: disable=W0613
        if self.measure_energy:
            self.sensors = discover_sensors(self.device, ['energy'])
            for sensor in self.sensors:
                sensor.label = identifier(sensor.label).upper()

    def setup(self, context):
        self.logger.debug('Creating script {}'.format(self.host_script_file))
        with open(self.host_script_file, 'w') as wfh:
            env = jinja2.Environment(loader=jinja2.FileSystemLoader(THIS_DIR))
            template = env.get_template(TEMPLATE_NAME)
            wfh.write(
                template.render(
                    device=self.device,  # pylint: disable=maybe-no-member
                    sensors=self.sensors,
                    iterations=self.times,
                    package=APP_CONFIG[self.app]['package'],
                    activity=APP_CONFIG[self.app]['activity'],
                    options=APP_CONFIG[self.app]['options'],
                ))
        self.device_script_file = self.device.install(self.host_script_file)
        if self.set_launcher_affinity:
            self._set_launcher_affinity()
        self.device.clear_logcat()

    def run(self, context):
        self.device.execute('sh {}'.format(self.device_script_file),
                            timeout=300)

    def update_result(self, context):
        result_files = ['time.result']
        result_files += [
            '{}.result'.format(sensor.label) for sensor in self.sensors
        ]
        for filename in result_files:
            host_result_file = os.path.join(context.output_directory, filename)
            device_result_file = self.device.path.join(
                self.device.working_directory, filename)
            self.device.pull_file(device_result_file, host_result_file)

            with open(host_result_file) as fh:
                if filename == 'time.result':
                    values = [v / 1000 for v in map(int, fh.read().split())]
                    _add_metric(context, 'time', values, 'Seconds')
                else:
                    metric = filename.replace('.result', '').lower()
                    numbers = iter(map(int, fh.read().split()))
                    deltas = [(after - before) / 1000000
                              for before, after in zip(numbers, numbers)]
                    _add_metric(context, metric, deltas, 'Joules')

    def teardown(self, context):
        if self.set_launcher_affinity:
            self._reset_launcher_affinity()
        if self.cleanup:
            self.device.delete_file(self.device_script_file)

    def _set_launcher_affinity(self):
        try:
            self._launcher_pid = self.device.get_pids_of(
                'com.android.launcher')[0]
            result = self.device.execute('taskset -p {}'.format(
                self._launcher_pid),
                                         busybox=True,
                                         as_root=True)
            self._old_launcher_affinity = int(result.split(':')[1].strip(), 16)

            cpu_ids = [
                i for i, x in enumerate(self.device.core_names) if x == 'a15'
            ]
            if not cpu_ids or len(cpu_ids) == len(self.device.core_names):
                self.logger.debug('Cannot set affinity.')
                return

            new_mask = reduce(lambda x, y: x | y, cpu_ids, 0x0)
            self.device.execute('taskset -p 0x{:X} {}'.format(
                new_mask, self._launcher_pid),
                                busybox=True,
                                as_root=True)
        except IndexError:
            raise WorkloadError(
                'Could not set affinity of launcher: PID not found.')

    def _reset_launcher_affinity(self):
        command = 'taskset -p 0x{:X} {}'.format(self._old_launcher_affinity,
                                                self._launcher_pid)
        self.device.execute(command, busybox=True, as_root=True)
Ejemplo n.º 4
0
class HttpGetter(ResourceGetter):

    name = 'http_assets'
    description = """
    Downloads resources from a server based on an index fetched from the specified URL.

    Given a URL, this will try to fetch ``<URL>/index.json``. The index file maps extension
    names to a list of corresponing asset descriptons. Each asset description continas a path
    (relative to the base URL) of the resource and a SHA256 hash, so that this Getter can
    verify whether the resource on the remote has changed.

    For example, let's assume we want to get the APK file for workload "foo", and that
    assets are hosted at ``http://example.com/assets``. This Getter will first try to
    donwload ``http://example.com/assests/index.json``. The index file may contian
    something like ::

        {
            "foo": [
                {
                    "path": "foo-app.apk",
                    "sha256": "b14530bb47e04ed655ac5e80e69beaa61c2020450e18638f54384332dffebe86"
                },
                {
                    "path": "subdir/some-other-asset.file",
                    "sha256": "48d9050e9802246d820625717b72f1c2ba431904b8484ca39befd68d1dbedfff"
                }
            ]
        }

    This Getter will look through the list of assets for "foo" (in this case, two) check
    the paths until it finds one matching the resource (in this case, "foo-app.apk").
    Finally, it will try to dowload that file relative to the base URL and extension name
    (in this case, "http://example.com/assets/foo/foo-app.apk"). The downloaded version
    will be cached locally, so that in the future, the getter will check the SHA256 hash
    of the local file against the one advertised inside index.json, and provided that hasn't
    changed, it won't try to download the file again.

    """
    priority = GetterPriority.remote
    resource_type = ['apk', 'file', 'jar', 'revent', 'executable']

    parameters = [
        Parameter('url',
                  global_alias='remote_assets_url',
                  description=
                  """URL of the index file for assets on an HTTP server."""),
        Parameter(
            'username',
            description="""User name for authenticating with assets URL"""),
        Parameter(
            'password',
            description="""Password for authenticationg with assets URL"""),
        Parameter(
            'always_fetch',
            kind=boolean,
            default=False,
            global_alias='always_fetch_remote_assets',
            description=
            """If ``True``, will always attempt to fetch assets from the remote, even if
                                 a local cached copy is available."""),
        Parameter('chunk_size',
                  kind=int,
                  default=1024,
                  description="""Chunk size for streaming large assets."""),
    ]

    def __init__(self, resolver, **kwargs):
        super(HttpGetter, self).__init__(resolver, **kwargs)
        self.index = None

    def get(self, resource, **kwargs):
        if not resource.owner:
            return  # TODO: add support for unowned resources
        if not self.index:
            self.index = self.fetch_index()
        asset = self.resolve_resource(resource)
        if not asset:
            return
        return self.download_asset(asset, resource.owner.name)

    def fetch_index(self):
        if not self.url:
            return {}
        index_url = urljoin(self.url, 'index.json')
        response = self.geturl(index_url)
        if response.status_code != httplib.OK:
            message = 'Could not fetch "{}"; recieved "{} {}"'
            self.logger.error(
                message.format(index_url, response.status_code,
                               response.reason))
            return {}
        return json.loads(response.content)

    def download_asset(self, asset, owner_name):
        url = urljoin(self.url, owner_name, asset['path'])
        local_path = _f(
            os.path.join(settings.dependencies_directory, '__remote',
                         owner_name, asset['path'].replace('/', os.sep)))
        if os.path.exists(local_path) and not self.always_fetch:
            local_sha = sha256(local_path)
            if local_sha == asset['sha256']:
                self.logger.debug('Local SHA256 matches; not re-downloading')
                return local_path
        self.logger.debug('Downloading {}'.format(url))
        response = self.geturl(url, stream=True)
        if response.status_code != httplib.OK:
            message = 'Could not download asset "{}"; recieved "{} {}"'
            self.logger.warning(
                message.format(url, response.status_code, response.reason))
            return
        with open(local_path, 'wb') as wfh:
            for chunk in response.iter_content(chunk_size=self.chunk_size):
                wfh.write(chunk)
        return local_path

    def geturl(self, url, stream=False):
        if self.username:
            auth = (self.username, self.password)
        else:
            auth = None
        return requests.get(url, auth=auth, stream=stream)

    def resolve_resource(self, resource):
        # pylint: disable=too-many-branches,too-many-locals
        assets = self.index.get(resource.owner.name, {})
        if not assets:
            return {}
        if resource.name in ['apk', 'jar']:
            paths = [a['path'] for a in assets]
            version = getattr(resource, 'version', None)
            found = get_from_list_by_extension(resource, paths, resource.name,
                                               version)
            if found:
                for a in assets:
                    if a['path'] == found:
                        return a
        elif resource.name == 'revent':
            device_model = resource.owner.device.get_device_model()
            wa_device_name = resource.owner.device.name
            for name in [device_model, wa_device_name]:
                if not name:
                    continue
                filename = '.'.join([name, resource.stage, 'revent']).lower()
                for asset in assets:
                    pathname = os.path.basename(asset['path']).lower()
                    if pathname == filename:
                        try:
                            ReventRecording(
                                asset['path']).close()  # Check valid recording
                            return asset
                        except ValueError as e:
                            self.logger.warning(e.message)
        elif resource.name == 'executable':
            platform = resource.platform
            path = '/'.join(['bin', platform, resource.filename])
            for asset in assets:
                if asset['path'].lower() == path.lower():
                    return asset
        else:  # file
            for asset in assets:
                if asset['path'].lower() == resource.path.lower():
                    return asset
Ejemplo n.º 5
0
class IPythonNotebookExporter(ResultProcessor):

    name = 'ipynb_exporter'
    description = """
    Generates an IPython notebook from a template with the results and runs it.
    Optionally it can show the resulting notebook in a web browser.
    It can also generate a PDF from the notebook.

    The template syntax is that of `jinja2 <http://jinja.pocoo.org/>`_
    and the template should generate a valid ipython notebook. The
    templates receives ``result`` and ``context`` which correspond to
    the RunResult and ExecutionContext respectively. You can use those
    in your ipython notebook template to extract any information you
    want to parse or show.

    This results_processor depends on ``ipython`` and ``python-jinja2`` being
    installed on the system.

    For example, a simple template that plots a bar graph of the results is::

    """
    # Note: the example template is appended after the class definition

    parameters = [
        Parameter('notebook_template',
                  default=DEFAULT_NOTEBOOK_TEMPLATE,
                  description='''Filename of the ipython notebook template.  If
                  no `notebook_template` is specified, the example template
                  above is used.'''),
        Parameter('notebook_name_prefix',
                  default='result_',
                  description=''' Prefix of the name of the notebook. The date,
                  time and ``.ipynb`` are appended to form the notebook filename.
                  E.g. if notebook_name_prefix is ``result_`` then a run on 13th
                  April 2015 at 9:54 would generate a notebook called
                  ``result_150413-095400.ipynb``. When generating a PDF,
                  the resulting file will have the same name, but
                  ending in ``.pdf``.'''),
        Parameter(
            'show_notebook',
            kind=bool,
            description='Open a web browser with the resulting notebook.'),
        Parameter('notebook_directory',
                  description='''Path to the notebooks directory served by the
                  ipython notebook server. You must set it if
                  ``show_notebook`` is selected. The ipython notebook
                  will be copied here if specified.'''),
        Parameter('notebook_url',
                  default='http://localhost:8888/notebooks',
                  description='''URL of the notebook on the IPython server. If
                  not specified, it will be assumed to be in the root notebooks
                  location on localhost, served on port 8888. Only needed if
                  ``show_notebook`` is selected.

                  .. note:: the URL should not contain the final part (the notebook name) which will be populated automatically.
                  '''),
        Parameter('convert_to_html',
                  kind=bool,
                  description='Convert the resulting notebook to HTML.'),
        Parameter('show_html',
                  kind=bool,
                  description='''Open the exported html notebook at the end of
                  the run. This can only be selected if convert_to_html has
                  also been selected.'''),
        Parameter('convert_to_pdf',
                  kind=bool,
                  description='Convert the resulting notebook to PDF.'),
        Parameter('show_pdf',
                  kind=bool,
                  description='''Open the pdf at the end of the run. This can
                  only be selected if convert_to_pdf has also been selected.'''
                  ),
    ]

    def initialize(self, context):
        file_resource = File(self, self.notebook_template)
        self.notebook_template_file = context.resolver.get(file_resource)
        nbbasename_template = self.notebook_name_prefix + '%y%m%d-%H%M%S.ipynb'
        self.nbbasename = datetime.now().strftime(nbbasename_template)

    def validate(self):
        if ipython.import_error_str:
            raise ResultProcessorError(ipython.import_error_str)

        if not jinja2:
            msg = '{} requires python-jinja2 package to be installed'.format(
                self.name)
            raise ResultProcessorError(msg)

        if self.show_notebook and not self.notebook_directory:
            raise ConfigError(
                'Requested "show_notebook" but no notebook_directory was specified'
            )

        if self.notebook_directory and not os.path.isdir(
                self.notebook_directory):
            raise ConfigError('notebook_directory {} does not exist'.format(
                self.notebook_directory))

        if self.show_html and not self.convert_to_html:  # pylint: disable=E0203
            self.convert_to_html = True
            self.logger.debug(
                'Assuming "convert_to_html" as "show_html" is set')

        if self.show_pdf and not self.convert_to_pdf:  # pylint: disable=E0203
            self.convert_to_pdf = True
            self.logger.debug('Assuming "convert_to_pdf" as "show_pdf" is set')

    def export_run_result(self, result, context):
        self.generate_notebook(result, context)
        if self.show_notebook:
            self.open_notebook()

        if self.convert_to_pdf:
            ipython.export_notebook(self.nbbasename,
                                    context.run_output_directory, 'pdf')
            if self.show_pdf:
                self.open_file('pdf')

        if self.convert_to_html:
            ipython.export_notebook(self.nbbasename,
                                    context.run_output_directory, 'html')
            if self.show_html:
                self.open_file('html')

    def generate_notebook(self, result, context):
        """Generate a notebook from the template and run it"""
        with open(self.notebook_template_file) as fin:
            template = jinja2.Template(fin.read())

        notebook_in = template.render(result=result, context=context)
        notebook = ipython.read_notebook(notebook_in)

        ipython.run_notebook(notebook)

        self.notebook_file = os.path.join(context.run_output_directory,
                                          self.nbbasename)
        with open(self.notebook_file, 'w') as wfh:
            ipython.write_notebook(notebook, wfh)

        if self.notebook_directory:
            shutil.copy(self.notebook_file,
                        os.path.join(self.notebook_directory))

    def open_notebook(self):
        """Open the notebook in a browser"""
        webbrowser.open(self.notebook_url.rstrip('/') + '/' + self.nbbasename)

    def open_file(self, output_format):
        """Open the exported notebook"""
        fname = os.path.splitext(self.notebook_file)[0] + "." + output_format
        open_file(fname)
Ejemplo n.º 6
0
class Andebench(AndroidUiAutoBenchmark):

    name = 'andebench'
    description = """
    AndEBench is an industry standard Android benchmark provided by The
    Embedded Microprocessor Benchmark Consortium (EEMBC).

    http://www.eembc.org/andebench/about.php

    From the website:

       - Initial focus on CPU and Dalvik interpreter performance
       - Internal algorithms concentrate on integer operations
       - Compares the difference between native and Java performance
       - Implements flexible multicore performance analysis
       - Results displayed in Iterations per second
       - Detailed log file for comprehensive engineering analysis

    """
    package = 'com.eembc.coremark'
    activity = 'com.eembc.coremark.splash'
    summary_metrics = ['AndEMark Java', 'AndEMark Native']

    parameters = [
        Parameter('number_of_threads', kind=int,
                  description='Number of threads that will be spawned by AndEBench.'),
        Parameter('single_threaded', kind=bool,
                  description="""
                  If ``true``, AndEBench will run with a single thread. Note: this must
                  not be specified if ``number_of_threads`` has been specified.
                  """),
        Parameter('native_only', kind=bool,
                  description="""
                  If ``true``, AndEBench will execute only the native portion of the benchmark.
                  """),
    ]

    aliases = [
        Alias('andebenchst', number_of_threads=1),
    ]

    regex = re.compile('\s*(?P<key>(AndEMark Native|AndEMark Java))\s*:'
                       '\s*(?P<value>\d+)')

    def validate(self):
        if (self.number_of_threads is not None) and (self.single_threaded is not None):  # pylint: disable=E1101
            raise ConfigError('Can\'t specify both number_of_threads and single_threaded parameters.')

    def setup(self, context):
        if self.number_of_threads is None:  # pylint: disable=access-member-before-definition
            if self.single_threaded:  # pylint: disable=E1101
                self.number_of_threads = 1  # pylint: disable=attribute-defined-outside-init
            else:
                self.number_of_threads = self.device.number_of_cores  # pylint: disable=W0201
        self.logger.debug('Using {} threads'.format(self.number_of_threads))
        self.uiauto_params['number_of_threads'] = self.number_of_threads
        self.uiauto_params['native_only'] = False
        if self.native_only:
            self.uiauto_params['native_only'] = True
        # Called after this setup as modifying uiauto_params
        super(Andebench, self).setup(context)

    def update_result(self, context):
        super(Andebench, self).update_result(context)
        results = {}
        with open(self.logcat_log) as fh:
            for line in fh:
                match = self.regex.search(line)
                if match:
                    data = match.groupdict()
                    results[data['key']] = data['value']
        for key, value in results.iteritems():
            context.result.add_metric(key, value)
Ejemplo n.º 7
0
class ChromeAutotest(Workload):

    name = 'autotest'
    description = '''
    Executes tests from ChromeOS autotest suite

    .. note:: This workload *must* be run inside a CromeOS SDK chroot.

    See: https://www.chromium.org/chromium-os/testing/power-testing

    '''
    supported_platforms = ['chromeos']

    parameters = [
        Parameter('test',
                  mandatory=True,
                  description='''
                  The test to be run
                  '''),
        Parameter('test_that_args',
                  kind=arguments,
                  default='',
                  description='''
                  Extra arguments to be passed to test_that_invocation.
                  '''),
        Parameter('run_timeout',
                  kind=int,
                  default=30 * 60,
                  description='''
                  Timeout, in seconds, for the test execution.
                  '''),
    ]

    def setup(self, context):
        if self.device.platform != 'chromeos':
            raise WorkloadError('{} only supports ChromeOS devices'.format(
                self.name))
        self.test_that = which('test_that')
        if not self.test_that:
            message = (
                'Could not find "test_that"; {} must be running in a ChromeOS SDK chroot '
                '(did you execute "cros_sdk"?)')
            raise WorkloadError(message.format(self.name))
        self.command = self._build_command()
        self.raw_output = None
        # make sure no other test is running
        self.device.execute('killall -9 autotest', check_exit_code=False)

    def run(self, context):
        self.logger.debug(self.command)
        self.raw_output, _ = check_output(self.command,
                                          timeout=self.run_timeout,
                                          shell=True)

    def update_result(self, context):
        if not self.raw_output:
            self.logger.warning(
                'No power_LoadTest output detected; run failed?')
            return
        raw_outfile = os.path.join(context.output_directory,
                                   'autotest-output.raw')
        with open(raw_outfile, 'w') as wfh:
            wfh.write(self.raw_output)
        context.add_artifact('autotest_raw', raw_outfile, kind='raw')
        lines = iter(self.raw_output.split('\n'))
        # Results are delimitted from the rest of the output by MARKER
        for line in lines:
            if MARKER in line:
                break
        for line in lines:
            match = STATUS_REGEX.search(line)
            if match:
                status = match.group(1)
                if status != 'PASSED':
                    self.logger.warning(line)
            match = METRIC_REGEX.search(line)
            if match:
                try:
                    context.result.add_metric(match.group(1),
                                              numeric(match.group(2)),
                                              lower_is_better=True)
                except ValueError:
                    pass  # non-numeric metrics aren't supported

    def _build_command(self):
        parts = [self.test_that, self.device.host, self.test]
        parts.append(str(self.test_that_args))
        return ' '.join(parts)
Ejemplo n.º 8
0
class SkypeVideo(UiAutomatorWorkload):

    name = 'skypevideo'
    description = """
    Initiates Skype video call to a specified contact for a pre-determined duration.
    (Note: requires Skype to be set up appropriately).

    This workload is intended for monitoring the behaviour of a device while a Skype
    video call is in progress (a common use case). It does not produce any score or
    metric and the intention is that some addition instrumentation is enabled while
    running this workload.

    This workload, obviously, requires a network connection (ideally, wifi).

    This workload accepts the following parameters:


    **Skype Setup**

       - You should install Skype client from Google Play Store on the device
         (this was tested with client version 4.5.0.39600; other recent versions
         should also work).
       - You must have an account set up and logged into Skype on the device.
       - The contact to be called must be added (and has accepted) to the
         account. It's possible to have multiple contacts in the list, however
         the contact to be called *must* be visible on initial navigation to the
         list.
       - The contact must be able to received the call. This means that there
         must be  a Skype client running (somewhere) with the contact logged in
         and that client must have been configured to auto-accept calls from the
         account on the device (how to set this varies between different versions
         of Skype and between platforms -- please search online for specific
         instructions).
         https://support.skype.com/en/faq/FA3751/can-i-automatically-answer-all-my-calls-with-video-in-skype-for-windows-desktop

    """

    package = 'com.skype.raider'

    parameters = [
        Parameter('duration',
                  kind=int,
                  default=300,
                  description='Duration of the video call in seconds.'),
        Parameter('contact',
                  mandatory=True,
                  description="""
                  The name of the Skype contact to call. The contact must be already
                  added (see below). *If use_gui is set*, then this must be the skype
                  ID of the contact, *otherwise*, this must be the name of the
                  contact as it appears in Skype client's contacts list. In the latter case
                  it *must not* contain underscore characters (``_``); it may, however, contain
                  spaces. There is no default, you **must specify the name of the contact**.

                  .. note:: You may alternatively specify the contact name as
                            ``skype_contact`` setting in your ``config.py``. If this is
                            specified, the ``contact`` parameter is optional, though
                            it may still be specified (in which case it will override
                            ``skype_contact`` setting).
                  """),
        Parameter('use_gui',
                  kind=boolean,
                  default=False,
                  description="""
                  Specifies whether the call should be placed directly through a
                  Skype URI, or by navigating the GUI. The URI is the recommended way
                  to place Skype calls on a device, but that does not seem to work
                  correctly on some devices (the URI seems to just start Skype, but not
                  place the call), so an alternative exists that will start the Skype app
                  and will then navigate the UI to place the call (incidentally, this method
                  does not seem to work on all devices either, as sometimes Skype starts
                  backgrounded...). Please note that the meaning of ``contact`` prameter
                  is different depending on whether this is set.  Defaults to ``False``.

                  .. note:: You may alternatively specify this as ``skype_use_gui`` setting
                            in your ``config.py``.
                  """),
    ]

    def __init__(self, device, **kwargs):
        super(SkypeVideo, self).__init__(device, **kwargs)
        if self.use_gui:
            self.uiauto_params['name'] = self.contact.replace(' ', '_')
            self.uiauto_params['duration'] = self.duration
        self.run_timeout = self.duration + 30

    def setup(self, context):
        if self.use_gui:
            super(SkypeVideo, self).setup(context)
            self.device.execute('am force-stop {}'.format(self.package))
            self.device.execute(
                'am start -W -a android.intent.action.VIEW -d skype:')
        else:
            self.device.execute('am force-stop {}'.format(self.package))

    def run(self, context):
        if self.use_gui:
            super(SkypeVideo, self).run(context)
        else:
            command = "am start -W -a android.intent.action.VIEW -d \"skype:{}?call&video=true\""
            self.logger.debug(self.device.execute(command.format(
                self.contact)))
            self.logger.debug('Call started; waiting for {} seconds...'.format(
                self.duration))
            time.sleep(self.duration)
            self.device.execute('am force-stop com.skype.raider')

    def update_result(self, context):
        pass

    def teardown(self, context):
        if self.use_gui:
            super(SkypeVideo, self).teardown(context)
        self.device.execute('am force-stop {}'.format(self.package))
Ejemplo n.º 9
0
class EnergyModelInstrument(Instrument):

    name = 'energy_model'
    desicription = """
    Generates a power mode for the device based on specified workload.

    This insturment will execute the workload specified by the agenda (currently, only ``sysbench`` is
    supported) and will use the resulting performance and power measurments to generate a power mode for
    the device.

    This instrument requires certain features to be present in the kernel:

    1. cgroups and cpusets must be enabled.
    2. cpufreq and userspace governor must be enabled.
    3. cpuidle must be enabled.

    """

    parameters = [
        Parameter('device_name', kind=caseless_string,
                  description="""The name of the device to be used in  generating the model. If not specified,
                                 ``device.name`` will be used. """),
        Parameter('big_core', kind=caseless_string,
                  description="""The name of the "big" core in the big.LITTLE system; must match
                                 one of the values in ``device.core_names``. """),
        Parameter('performance_metric', kind=caseless_string, mandatory=True,
                  description="""Metric to be used as the performance indicator."""),
        Parameter('power_metric', kind=list_or_caseless_string,
                  description="""Metric to be used as the power indicator. The value may contain a
                                 ``{core}`` format specifier that will be replaced with names of big
                                 and little cores to drive the name of the metric for that cluster.
                                 Ether this or ``energy_metric`` must be specified but not both."""),
        Parameter('energy_metric', kind=list_or_caseless_string,
                  description="""Metric to be used as the energy indicator. The value may contain a
                                 ``{core}`` format specifier that will be replaced with names of big
                                 and little cores to drive the name of the metric for that cluster.
                                 this metric will be used to derive power by deviding through by
                                 execution time. Either this or ``power_metric`` must be specified, but
                                 not both."""),
        Parameter('power_scaling_factor', kind=float, default=1.0,
                  description="""Power model specfies power in milliWatts. This is a scaling factor that
                                 power_metric values will be multiplied by to get milliWatts."""),
        Parameter('big_frequencies', kind=list_of_ints,
                  description="""List of frequencies to be used for big cores. These frequencies must
                                 be supported by the cores. If this is not specified, all available
                                 frequencies for the core (as read from cpufreq) will be used."""),
        Parameter('little_frequencies', kind=list_of_ints,
                  description="""List of frequencies to be used for little cores. These frequencies must
                                 be supported by the cores. If this is not specified, all available
                                 frequencies for the core (as read from cpufreq) will be used."""),
        Parameter('idle_workload', kind=str, default='idle',
                  description="Workload to be used while measuring idle power."),
        Parameter('idle_workload_params', kind=dict, default={},
                  description="Parameter to pass to the idle workload."),
        Parameter('first_cluster_idle_state', kind=int, default=-1,
                  description='''The index of the first cluster idle state on the device. Previous states
                                 are assumed to be core idles. The default is ``-1``, i.e. only the last
                                 idle state is assumed to affect the entire cluster.'''),
        Parameter('no_hotplug', kind=bool, default=False,
                  description='''This options allows running the instrument without hotpluging cores on and off.
                                 Disabling hotplugging will most likely produce a less accurate power model.'''),
    ]

    def validate(self):
        if import_error:
            message = 'energy_model instrument requires pandas, jinja2 and matplotlib Python packages to be installed; got: "{}"'
            raise InstrumentError(message.format(import_error.message))
        for capability in ['cgroups', 'cpuidle']:
            if not self.device.has(capability):
                message = 'The Device does not appear to support {}; does it have the right module installed?'
                raise ConfigError(message.format(capability))
        device_cores = set(self.device.core_names)
        if (self.power_metric and self.energy_metric) or not (self.power_metric or self.energy_metric):
            raise ConfigError('Either power_metric or energy_metric must be specified (but not both).')
        if not device_cores:
            raise ConfigError('The Device does not appear to have core_names configured.')
        elif len(device_cores) != 2:
            raise ConfigError('The Device does not appear to be a big.LITTLE device.')
        if self.big_core and self.big_core not in self.device.core_names:
            raise ConfigError('Specified big_core "{}" is in divice {}'.format(self.big_core, self.device.name))
        if not self.big_core:
            self.big_core = self.device.core_names[-1]  # the last core is usually "big" in existing big.LITTLE devices
        if not self.device_name:
            self.device_name = self.device.name

    def initialize(self, context):
        self.number_of_cpus = {}
        self.report_template_file = context.resolver.get(File(self, REPORT_TEMPLATE_FILE))
        self.em_template_file = context.resolver.get(File(self, EM_TEMPLATE_FILE))
        self.little_core = (set(self.device.core_names) - set([self.big_core])).pop()
        self.perform_runtime_validation()
        self.enable_all_cores()
        self.configure_clusters()
        self.discover_idle_states()
        self.disable_thermal_management()
        self.initialize_job_queue(context)
        self.initialize_result_tracking()

    def setup(self, context):
        if not context.spec.label.startswith('idle_'):
            return
        for idle_state in self.get_device_idle_states(self.measured_cluster):
            if idle_state.id == context.spec.idle_state_id:
                idle_state.disable = 0
            else:
                idle_state.disable = 1

    def fast_start(self, context):  # pylint: disable=unused-argument
        self.start_time = time.time()

    def fast_stop(self, context):  # pylint: disable=unused-argument
        self.run_time = time.time() - self.start_time

    def on_iteration_start(self, context):
        self.setup_measurement(context.spec.cluster)

    # slow to make sure power results have been generated
    def slow_update_result(self, context):
        spec = context.result.spec
        cluster = spec.cluster
        is_freq_iteration = spec.label.startswith('freq_')
        perf_metric = 0
        power_metric = 0
        for metric in context.result.metrics:
            if metric.name == self.performance_metric:
                perf_metric = metric.value
            elif (cluster == 'big') and metric.name in self.big_power_metrics:
                power_metric += metric.value * self.power_scaling_factor
            elif (cluster == 'little') and metric.name in self.little_power_metrics:
                power_metric += metric.value * self.power_scaling_factor
            elif (cluster == 'big') and metric.name in self.big_energy_metrics:
                power_metric += metric.value / self.run_time * self.power_scaling_factor
            elif (cluster == 'little') and metric.name in self.little_energy_metrics:
                power_metric += metric.value / self.run_time * self.power_scaling_factor

        if not (power_metric and (perf_metric or not is_freq_iteration)):
            message = 'Incomplete results for {} iteration{}'
            raise InstrumentError(message.format(context.result.spec.id, context.current_iteration))

        if is_freq_iteration:
            index_matter = [cluster, spec.num_cpus,
                            spec.frequency, context.result.iteration]
            data = self.freq_data
        else:
            index_matter = [cluster, spec.num_cpus,
                            spec.idle_state_id, spec.idle_state_desc, context.result.iteration]
            data = self.idle_data
            if self.no_hotplug:
                # due to that fact that hotpluging was disabled, power has to be artificially scaled
                # to the number of cores that should have been active if hotplugging had occurred.
                power_metric = spec.num_cpus * (power_metric / self.number_of_cpus[cluster])

        data.append(index_matter + ['performance', perf_metric])
        data.append(index_matter + ['power', power_metric])

    def before_overall_results_processing(self, context):
        # pylint: disable=too-many-locals
        if not self.idle_data or not self.freq_data:
            self.logger.warning('Run aborted early; not generating energy_model.')
            return
        output_directory = os.path.join(context.output_directory, 'energy_model')
        os.makedirs(output_directory)

        df = pd.DataFrame(self.idle_data, columns=['cluster', 'cpus', 'state_id',
                                                   'state', 'iteration', 'metric', 'value'])
        idle_power_table = wa_result_to_power_perf_table(df, '', index=['cluster', 'cpus', 'state'])
        idle_output = os.path.join(output_directory, IDLE_TABLE_FILE)
        with open(idle_output, 'w') as wfh:
            idle_power_table.to_csv(wfh, index=False)
        context.add_artifact('idle_power_table', idle_output, 'export')

        df = pd.DataFrame(self.freq_data,
                          columns=['cluster', 'cpus', 'frequency', 'iteration', 'metric', 'value'])
        freq_power_table = wa_result_to_power_perf_table(df, self.performance_metric,
                                                         index=['cluster', 'cpus', 'frequency'])
        freq_output = os.path.join(output_directory, FREQ_TABLE_FILE)
        with open(freq_output, 'w') as wfh:
            freq_power_table.to_csv(wfh, index=False)
        context.add_artifact('freq_power_table', freq_output, 'export')

        cpus_table = get_cpus_power_table(freq_power_table, 'frequency')
        cpus_output = os.path.join(output_directory, CPUS_TABLE_FILE)
        with open(cpus_output, 'w') as wfh:
            cpus_table.to_csv(wfh)
        context.add_artifact('cpus_table', cpus_output, 'export')

        em = build_energy_model(freq_power_table, cpus_table, idle_power_table, self.first_cluster_idle_state)
        em_file = os.path.join(output_directory, '{}_em.c'.format(self.device_name))
        em_text = generate_em_c_file(em, self.big_core, self.little_core,
                                     self.em_template_file, em_file)
        context.add_artifact('em', em_file, 'data')

        report_file = os.path.join(output_directory, 'report.html')
        generate_report(freq_power_table, cpus_table, idle_power_table,
                        self.report_template_file, self.device_name, em_text,
                        report_file)
        context.add_artifact('pm_report', report_file, 'export')

    def initialize_result_tracking(self):
        self.freq_data = []
        self.idle_data = []
        self.big_power_metrics = []
        self.little_power_metrics = []
        self.big_energy_metrics = []
        self.little_energy_metrics = []
        if self.power_metric:
            self.big_power_metrics = [pm.format(core=self.big_core) for pm in self.power_metric]
            self.little_power_metrics = [pm.format(core=self.little_core) for pm in self.power_metric]
        else:  # must be energy_metric
            self.big_energy_metrics = [em.format(core=self.big_core) for em in self.energy_metric]
            self.little_energy_metrics = [em.format(core=self.little_core) for em in self.energy_metric]

    def configure_clusters(self):
        self.measured_cores = None
        self.measuring_cores = None
        self.cpuset = self.device.get_cgroup_controller('cpuset')
        self.cpuset.create_group('big', self.big_cpus, [0])
        self.cpuset.create_group('little', self.little_cpus, [0])
        for cluster in set(self.device.core_clusters):
            self.device.set_cluster_governor(cluster, 'userspace')

    def discover_idle_states(self):
        online_cpu = self.device.get_online_cpus(self.big_core)[0]
        self.big_idle_states = self.device.get_cpuidle_states(online_cpu)
        online_cpu = self.device.get_online_cpus(self.little_core)[0]
        self.little_idle_states = self.device.get_cpuidle_states(online_cpu)
        if not (len(self.big_idle_states) >= 2 and len(self.little_idle_states) >= 2):
            raise DeviceError('There do not appeart to be at least two idle states '
                              'on at least one of the clusters.')

    def setup_measurement(self, measured):
        measuring = 'big' if measured == 'little' else 'little'
        self.measured_cluster = measured
        self.measuring_cluster = measuring
        self.measured_cpus = self.big_cpus if measured == 'big' else self.little_cpus
        self.measuring_cpus = self.little_cpus if measured == 'big' else self.big_cpus
        self.reset()

    def reset(self):
        self.enable_all_cores()
        self.enable_all_idle_states()
        self.reset_cgroups()
        self.cpuset.move_all_tasks_to(self.measuring_cluster)
        server_process = 'adbd' if self.device.platform == 'android' else 'sshd'
        server_pids = self.device.get_pids_of(server_process)
        children_ps = [e for e in self.device.ps()
                       if e.ppid in server_pids and e.name != 'sshd']
        children_pids = [e.pid for e in children_ps]
        pids_to_move = server_pids + children_pids
        self.cpuset.root.add_tasks(pids_to_move)
        for pid in pids_to_move:
            self.device.execute('busybox taskset -p 0x{:x} {}'.format(list_to_mask(self.measuring_cpus), pid))

    def enable_all_cores(self):
        counter = Counter(self.device.core_names)
        for core, number in counter.iteritems():
            self.device.set_number_of_online_cpus(core, number)
        self.big_cpus = self.device.get_online_cpus(self.big_core)
        self.little_cpus = self.device.get_online_cpus(self.little_core)

    def enable_all_idle_states(self):
        for state in self.big_idle_states:
            state.disable = 0
        for state in self.little_idle_states:
            state.disable = 0

    def reset_cgroups(self):
        self.big_cpus = self.device.get_online_cpus(self.big_core)
        self.little_cpus = self.device.get_online_cpus(self.little_core)
        self.cpuset.big.set(self.big_cpus, 0)
        self.cpuset.little.set(self.little_cpus, 0)

    def perform_runtime_validation(self):
        if not self.device.is_rooted:
            raise InstrumentError('the device must be rooted to generate energy models')
        if 'userspace' not in self.device.list_available_cluster_governors(0):
            raise InstrumentError('userspace cpufreq governor must be enabled')

        error_message = 'Frequency {} is not supported by {} cores'
        available_frequencies = self.device.list_available_core_frequencies(self.big_core)
        if self.big_frequencies:
            for freq in self.big_frequencies:
                if freq not in available_frequencies:
                    raise ConfigError(error_message.format(freq, self.big_core))
        else:
            self.big_frequencies = available_frequencies
        available_frequencies = self.device.list_available_core_frequencies(self.little_core)
        if self.little_frequencies:
            for freq in self.little_frequencies:
                if freq not in available_frequencies:
                    raise ConfigError(error_message.format(freq, self.little_core))
        else:
            self.little_frequencies = available_frequencies

    def initialize_job_queue(self, context):
        old_specs = []
        for job in context.runner.job_queue:
            if job.spec not in old_specs:
                old_specs.append(job.spec)
        new_specs = self.get_cluster_specs(old_specs, 'big', context)
        new_specs.extend(self.get_cluster_specs(old_specs, 'little', context))

        # Update config to refect jobs that will actually run.
        context.config.workload_specs = new_specs
        config_file = os.path.join(context.host_working_directory, 'run_config.json')
        with open(config_file, 'wb') as wfh:
            context.config.serialize(wfh)

        context.runner.init_queue(new_specs)

    def get_cluster_specs(self, old_specs, cluster, context):
        core = self.get_core_name(cluster)
        self.number_of_cpus[cluster] = sum([1 for c in self.device.core_names if c == core])

        cluster_frequencies = self.get_frequencies_param(cluster)
        if not cluster_frequencies:
            raise InstrumentError('Could not read available frequencies for {}'.format(core))

        idle_states = self.get_device_idle_states(cluster)
        new_specs = []
        for state in idle_states:
            for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
                spec = old_specs[0].copy()
                spec.workload_name = self.idle_workload
                spec.workload_parameters = self.idle_workload_params
                spec.idle_state_id = state.id
                spec.idle_state_desc = state.desc
                if not self.no_hotplug:
                    spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
                spec.cluster = cluster
                spec.num_cpus = num_cpus
                spec.id = '{}_idle_{}_{}'.format(cluster, state.id, num_cpus)
                spec.label = 'idle_{}'.format(cluster)
                spec.number_of_iterations = old_specs[0].number_of_iterations
                spec.load(self.device, context.config.ext_loader)
                spec.workload.init_resources(context)
                spec.workload.validate()
                new_specs.append(spec)
        for old_spec in old_specs:
            if old_spec.workload_name not in ['sysbench', 'dhrystone']:
                raise ConfigError('Only sysbench and dhrystone workloads currently supported for energy_model generation.')
            for freq in cluster_frequencies:
                for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
                    spec = old_spec.copy()
                    spec.runtime_parameters['{}_frequency'.format(core)] = freq
                    if not self.no_hotplug:
                        spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
                    spec.id = '{}_{}_{}'.format(cluster, num_cpus, freq)
                    spec.label = 'freq_{}_{}'.format(cluster, spec.label)
                    spec.workload_parameters['taskset_mask'] = list_to_mask(self.get_cpus(cluster))
                    spec.workload_parameters['threads'] = num_cpus
                    if old_spec.workload_name == 'sysbench':
                        # max_requests set to an arbitrary high values to make sure
                        # sysbench runs for full duriation even on highly
                        # performant cores.
                        spec.workload_parameters['max_requests'] = 10000000
                    spec.cluster = cluster
                    spec.num_cpus = num_cpus
                    spec.frequency = freq
                    spec.load(self.device, context.config.ext_loader)
                    spec.workload.init_resources(context)
                    spec.workload.validate()
                    new_specs.append(spec)
        return new_specs

    def disable_thermal_management(self):
        if self.device.file_exists('/sys/class/thermal/thermal_zone0'):
            tzone_paths = self.device.execute('ls /sys/class/thermal/thermal_zone*')
            for tzpath in tzone_paths.strip().split():
                mode_file = '{}/mode'.format(tzpath)
                if self.device.file_exists(mode_file):
                    self.device.set_sysfile_value(mode_file, 'disabled')

    def get_device_idle_states(self, cluster):
        if cluster == 'big':
            return self.big_idle_states
        else:
            return self.little_idle_states

    def get_core_name(self, cluster):
        if cluster == 'big':
            return self.big_core
        else:
            return self.little_core

    def get_cpus(self, cluster):
        if cluster == 'big':
            return self.big_cpus
        else:
            return self.little_cpus

    def get_frequencies_param(self, cluster):
        if cluster == 'big':
            return self.big_frequencies
        else:
            return self.little_frequencies
Ejemplo n.º 10
0
class Iozone(Workload):
    name = 'iozone'
    description = """
    Iozone is a filesystem benchmark that runs a series of disk
    I/O performance tests.

    Here is a list of tests that you can run in the iozone
    workload. The descriptions are from the official iozone
    document.

    0  - Write Test
         Measure performance of writing a new file. Other
         tests rely on the file written by this, so it must
         always be enabled (WA will automatically neable this
         if not specified).

    1  - Rewrite Test
         Measure performance of writing an existing file.

    2  - Read Test
         Measure performance of reading an existing file.

    3  - Reread Test
         Measure performance of rereading an existing file.

    4  - Random Read Test
         Measure performance of reading a file by accessing
         random locations within the file.

    5  - Random Write Test
         Measure performance of writing a file by accessing
         random locations within the file.

    6  - Backwards Read Test
         Measure performance of reading a file backwards.

    7  - Record Rewrite Test
         Measure performance of writing and rewriting a
         particular spot within the file.

    8  - Strided Read Test
         Measure performance of reading a file with strided
         access behavior.

    9  - Fwrite Test
         Measure performance of writing a file using the
         library function fwrite() that performances
         buffered write operations.

    10 - Frewrite Test
         Measure performance of writing a file using the
         the library function fwrite() that performs
         buffered and blocked write operations.

    11 - Fread Test
         Measure performance of reading a file using the
         library function fread() that performs buffered
         and blocked read operations.

    12 - Freread Test
         Same as the Fread Test except the current file
         being read was read previously sometime in the
         past.

    By default, iozone will run all tests in auto mode. To run
    specific tests, they must be written in the form of:

    [0,1,4,5]

    Please enable classifiers in your agenda or config file
    in order to display the results properly in the results.csv
    file.

    The official website for iozone is at www.iozone.org.
    """

    parameters = [
        Parameter('tests',
                  kind=list_of_ints,
                  allowed_values=range(13),
                  description='List of performance tests to run.'),
        Parameter('auto_mode',
                  kind=bool,
                  default=True,
                  description='Run tests in auto mode.'),
        Parameter('timeout',
                  kind=int,
                  default=14400,
                  description='Timeout for the workload.'),
        Parameter('file_size', kind=int, description='Fixed file size.'),
        Parameter('record_length',
                  kind=int,
                  description='Fixed record length.'),
        Parameter('threads', kind=int, description='Number of threads'),
        Parameter('other_params',
                  kind=str,
                  default='',
                  description='Other parameter. Run iozone -h to see'
                  ' list of options.')
    ]

    def initialize(self, context):
        Iozone.host_binary = context.resolver.get(
            Executable(self, self.device.abi, 'iozone'))
        Iozone.device_binary = self.device.install(Iozone.host_binary)

    def setup(self, context):
        self.results = os.path.join(self.device.working_directory,
                                    iozone_results_txt)
        self.command = self._build_command()

        if self.threads and self.auto_mode:
            raise ConfigError("You cannot set the number of threads and enable"
                              " auto mode at the same time.")

    def _build_command(self):
        # pylint: disable=access-member-before-definition
        iozone_command = 'cd {} && {}'.format(self.device.working_directory,
                                              self.device_binary)

        if self.auto_mode:
            iozone_command += ' -a'

        if self.tests:
            if 0 not in self.tests:
                self.tests = [0] + self.tests
            iozone_command += ''.join([' -i {}'.format(t) for t in self.tests])

        if self.record_length > 0:
            iozone_command += ' -r {}'.format(self.record_length)

        if self.threads > 0:
            iozone_command += ' -t {}'.format(self.threads)

        if self.file_size > 0:
            iozone_command += ' -s {}'.format(self.file_size)

        if self.other_params:
            iozone_command += ' ' + self.other_params

        # enable reporting mode for parsing non-thread results
        iozone_command += ' -R > {}'.format(self.results)

        # check if -b option is used
        match = re.search(r'-b (.?\w+.?\w+?\s)', iozone_command)
        if match:
            self.user_file = match.group(1)
            self.device_output_file = os.path.join(
                self.device.working_directory, self.user_file)

        return iozone_command

    def run(self, context):
        self.device.execute(self.command, timeout=self.timeout)

    def update_result(self, context):
        self.device.pull_file(self.results, context.output_directory)
        self.outfile = os.path.join(context.output_directory,
                                    iozone_results_txt)

        if '-b' in self.other_params:
            self.device.pull_file(self.device_output_file,
                                  context.output_directory)

        # if running in thread mode
        if self.threads:
            thread_results = self.parse_thread_results()

            for name, value, units in thread_results:
                context.add_metric(name, value, units)

        # for non-thread mode results
        else:
            with open(self.outfile, 'r') as iozone_file:
                iozone_file = (line.replace('\"', '') for line in iozone_file)
                table_list = []

                # begin parsing results
                for line in iozone_file:
                    if 'Writer report' in line:
                        table_list.append(line.split())
                        break

                for line in iozone_file:
                    if 'exiting' in line or 'completed' in line:
                        break
                    else:
                        table_list.append(line.split())

                # create csv file
                self.write_to_csv(context, table_list)

                # parse metrics
                self.parse_metrics(context, table_list)

    def write_to_csv(self, context, csv_table_list):
        self.test_file = os.path.join(context.output_directory,
                                      'table_results.csv')

        # create csv file for writing
        csv_file = open(self.test_file, 'w')
        wr = csv.writer(csv_file, delimiter=',')

        # shift second row by adding extra element
        # for "prettier" formatting
        index = 0
        for element in csv_table_list:
            if element:
                if index == 1:
                    element.insert(0, '0')
                index += 1
            else:
                index = 0

        # write to csv file
        for item in csv_table_list:
            wr.writerow(item)

        csv_file.close()

    # break list of results into smaller groups based on
    # test name
    def parse_metrics(self, context, plist):  # pylint: disable=no-self-use
        subvalue_list = []
        value_list = []
        for values in plist:
            if values:
                subvalue_list.append(values)
            else:
                value_list.append(subvalue_list)
                subvalue_list = []

        # If users run a list of specific tests, make
        # sure that the results for the last test
        # executed are appended.
        if subvalue_list:
            value_list.append(subvalue_list)

        for reports in value_list:
            # grab report name and convert it to a string
            report_name = reports[0]
            report_name = report_name[:-1]
            report_name = '_'.join(report_name).lower()

            record_sizes = reports[1]
            values = reports[2:]

            for v in values:
                templist = OrderedDict(izip_longest(record_sizes, v))

                for reclen, value in templist.items():
                    if reclen is '0':
                        fs = value

                    if value is None:
                        value = '0'

                    classifiers = {'reclen': reclen, 'file_size': fs}
                    if reclen != '0':
                        context.add_metric(report_name,
                                           int(value),
                                           'kb/s',
                                           classifiers=classifiers)

    # parse thread-mode results
    def parse_thread_results(self):
        results = []
        with open(self.outfile, 'r') as iozone_file:
            for line in iozone_file:
                # grab section of data we care about
                if 'Throughput report' in line:
                    break
                else:
                    if '=' in line:
                        if 'Time Resolution' not in line:
                            line = line.replace('=', '')
                            line = line.split()

                            # grab headers
                            if len(line) >= 8:
                                header = line[0]
                                subheader = ' '.join(line[-5:-2])
                                header += ' ' + subheader
                            else:
                                header = ' '.join(line[0:2])

                            units = line[-1]
                            value = line[-2]
                            tup = (header, value, units)
                            results.append(tup)

        return results

    def finalize(self, context):
        self.device.uninstall_executable(self.device_binary)
Ejemplo n.º 11
0
class AppShare(AndroidUxPerfWorkload):

    name = 'appshare'
    package = []
    activity = None
    view = []
    description = '''
    Workload to test how responsive a device is when context switching between
    application tasks. It combines workflows from googlephotos, gmail and
    skype.

    ** Setup **
    Credentials for the user account used to log into the Skype app have to be provided
    in the agenda, as well as the display name of the contact to call.

    For reliable testing, this workload requires a good and stable internet connection,
    preferably on Wi-Fi.

    Although this workload attempts to be network independent it requires a
    network connection (ideally, wifi) to run. This is because the welcome
    screen UI is dependent on an existing connection.

    Test description:
    1. GooglePhotos is started in offline access mode
        1.1. The welcome screen is dismissed
        1.2. Any promotion popup is dismissed
        1.3. The provided ``test_image`` is selected and displayed
    2. The image is then shared across apps to Gmail
        2.1. The first run dialogue is dismissed
        2.2. Enter recipient details in the To field
        2.3. Enter text in the Subject field
        2.4. Enter text in the Body field
        2.5. Click the Send mail button
    3. Return to Googlephotos and login to Skype via share action
    4. Return to Googlephotos and share the ``test_image`` with Skype
        4.1. Search for the ``skype_contact_name`` from the Contacts list
        4.2. Dismiss any update popup that appears
        4.3. The image is posted in the Chat
    '''

    parameters = [
        Parameter('test_image', kind=str, default='uxperf_1600x1200.jpg',
                  description='''
                  An image to be copied onto the device that will be shared
                  across multiple apps
                  '''),
        Parameter('email_recipient', kind=str, default='*****@*****.**',
                  description='''
                  The email address of the recipient to recieve the shared image
                  '''),
        Parameter('skype_login_name', kind=str, mandatory=True,
                  description='''
                  Account to use when logging into skype from which to share the image
                  '''),
        Parameter('skype_login_pass', kind=str, mandatory=True,
                  description='''
                  Password associated with the skype account
                  '''),
        Parameter('skype_contact_name', kind=str, default='Echo / Sound Test Service',
                  description='''
                  This is the contact display name as it appears in the people list
                  '''),
    ]

    # This workload relies on the internet so check that there is a working
    # internet connection
    requires_network = True

    def __init__(self, device, **kwargs):
        super(AppShare, self).__init__(device, **kwargs)
        self.deployable_assets = [self.test_image]
        self.clean_assets = True
        loader = ExtensionLoader()

        # Initialise googlephotos
        args_googlephotos = dict(kwargs)
        del args_googlephotos['test_image']
        del args_googlephotos['email_recipient']
        del args_googlephotos['skype_login_name']
        del args_googlephotos['skype_login_pass']
        del args_googlephotos['skype_contact_name']
        args_googlephotos['markers_enabled'] = False
        self.wl_googlephotos = loader.get_workload('googlephotos', device, **args_googlephotos)
        self.view += self.wl_googlephotos.view
        self.package.append(self.wl_googlephotos.package)

        # Initialise gmail
        args_gmail = dict(kwargs)
        del args_gmail['test_image']
        args_gmail['recipient'] = args_gmail.pop('email_recipient')
        del args_gmail['skype_login_name']
        del args_gmail['skype_login_pass']
        del args_gmail['skype_contact_name']
        args_gmail['markers_enabled'] = False
        self.wl_gmail = loader.get_workload('gmail', device, **args_gmail)
        self.view += self.wl_gmail.view
        self.package.append(self.wl_gmail.package)

        # Initialise skype
        args_skype = dict(kwargs)
        del args_skype['test_image']
        del args_skype['email_recipient']
        args_skype['login_name'] = args_skype.pop('skype_login_name')
        args_skype['login_pass'] = args_skype.pop('skype_login_pass')
        args_skype['contact_name'] = args_skype.pop('skype_contact_name')
        args_skype['markers_enabled'] = False
        self.wl_skype = loader.get_workload('skype', device, **args_skype)
        self.view += self.wl_skype.view
        self.package.append(self.wl_skype.package)

    def validate(self):
        super(AppShare, self).validate()
        # Set package to None as it doesnt allow it to be a list,
        # and we are not using it in the java side, only in wa itself.
        self.uiauto_params['package'] = None
        self.uiauto_params['googlephotos_package'] = self.wl_googlephotos.package
        self.uiauto_params['gmail_package'] = self.wl_gmail.package
        self.uiauto_params['skype_package'] = self.wl_skype.package
        self.uiauto_params['recipient'] = self.email_recipient
        self.uiauto_params['my_id'] = self.skype_login_name
        self.uiauto_params['my_pwd'] = self.skype_login_pass
        self.uiauto_params['name'] = self.skype_contact_name
        # Only accept certain image formats
        if os.path.splitext(self.test_image.lower())[1] not in ['.jpg', '.jpeg', '.png']:
            raise ValidationError('{} must be a JPEG or PNG file'.format(self.test_image))

    def setup(self, context):
        self.logger.info('Checking dependency Skype')
        self.wl_skype.launch_main = False
        self.wl_skype.deployable_assets = []
        self.wl_skype.init_resources(context)
        # Bypass running skype through intent
        AndroidUxPerfWorkload.setup(self.wl_skype, context)

        self.logger.info('Checking dependency Gmail')
        self.wl_gmail.launch_main = False
        self.wl_gmail.deployable_assets = []
        self.wl_gmail.init_resources(context)
        self.wl_gmail.setup(context)

        self.logger.info('Checking dependency Googlephotos')
        self.wl_googlephotos.launch_main = True
        self.wl_googlephotos.deployable_assets = []
        self.wl_googlephotos.init_resources(context)
        # Bypass googlephoto's asset setup
        AndroidUxPerfWorkload.setup(self.wl_googlephotos, context)

        self.logger.info('Checking dependency AppShare')
        super(AppShare, self).init_resources(context)
        # Only setup uiautomator side, then push assets
        # This prevents the requirement that AppShare must have an APK
        UiAutomatorWorkload.setup(self, context)
        self.push_assets(context)

    def teardown(self, context):
        self.wl_skype.teardown(context)
        self.wl_gmail.teardown(context)
        # Bypass googlephoto's asset teardown
        AndroidUxPerfWorkload.teardown(self.wl_googlephotos, context)

        super(AppShare, self).teardown(context)
Ejemplo n.º 12
0
class GlbCorp(ApkWorkload):

    name = 'glb_corporate'
    description = """
    GFXBench GL (a.k.a. GLBench) v3.0 Corporate version.

    This is a version of GLBench available through a corporate license (distinct
    from the version available in Google Play store).

    """
    package = 'net.kishonti.gfxbench'
    activity = 'net.kishonti.benchui.TestActivity'

    result_start_regex = re.compile(
        r'I/TfwActivity\s*\(\s*\d+\):\s+\S+\s+result: {')
    preamble_regex = re.compile(r'I/TfwActivity\s*\(\s*\d+\):\s+')

    valid_test_ids = [
        'gl_alu',
        'gl_alu_off',
        'gl_blending',
        'gl_blending_off',
        'gl_driver',
        'gl_driver_off',
        'gl_fill',
        'gl_fill_off',
        'gl_manhattan',
        'gl_manhattan_off',
        'gl_trex',
        'gl_trex_battery',
        'gl_trex_off',
        'gl_trex_qmatch',
        'gl_trex_qmatch_highp',
    ]

    supported_resolutions = {
        '720p': {
            '-ei -w': 1280,
            '-ei -h': 720,
        },
        '1080p': {
            '-ei -w': 1920,
            '-ei -h': 1080,
        }
    }

    parameters = [
        Parameter(
            'times',
            kind=int,
            default=1,
            constraint=lambda x: x > 0,
            description=
            ('Specifies the number of times the benchmark will be run in a "tight '
             'loop", i.e. without performaing setup/teardown inbetween.')),
        Parameter(
            'resolution',
            default=None,
            allowed_values=['720p', '1080p', '720', '1080'],
            description=
            ('Explicitly specifies the resultion under which the benchmark will '
             'be run. If not specfied, device\'s native resoution will used.'
             )),
        Parameter('test_id',
                  default='gl_manhattan_off',
                  allowed_values=valid_test_ids,
                  description='ID of the GFXBench test to be run.'),
        Parameter('run_timeout',
                  kind=int,
                  default=10 * 60,
                  description="""
                  Time out for workload execution. The workload will be killed if it hasn't completed
                  withint this period.
                  """),
    ]

    aliases = [
        Alias('manhattan', test_id='gl_manhattan'),
        Alias('manhattan_off', test_id='gl_manhattan_off'),
        Alias('manhattan_offscreen', test_id='gl_manhattan_off'),
    ]

    def setup(self, context):
        super(GlbCorp, self).setup(context)
        self.command = self._build_command()
        self.monitor = GlbRunMonitor(self.device)
        self.monitor.start()

    def start_activity(self):
        # Unlike with most other APK workloads, we're invoking the use case
        # directly by starting the activity with appropriate parameters on the
        # command line during execution, so we dont' need to start activity
        # during setup.
        pass

    def run(self, context):
        for _ in xrange(self.times):
            result = self.device.execute(self.command,
                                         timeout=self.run_timeout)
            if 'FAILURE' in result:
                raise WorkloadError(result)
            else:
                self.logger.debug(result)
            time.sleep(DELAY)
            self.monitor.wait_for_run_end(self.run_timeout)

    def update_result(self, context):  # NOQA
        super(GlbCorp, self).update_result(context)
        self.monitor.stop()
        iteration = 0
        results = []
        with open(self.logcat_log) as fh:
            try:
                line = fh.next()
                result_lines = []
                while True:
                    if self.result_start_regex.search(line):
                        result_lines.append('{')
                        line = fh.next()
                        while self.preamble_regex.search(line):
                            result_lines.append(
                                self.preamble_regex.sub('', line))
                            line = fh.next()
                        try:
                            result = json.loads(''.join(result_lines))
                            results.append(result)
                            if iteration:
                                suffix = '_{}'.format(iteration)
                            else:
                                suffix = ''
                            for sub_result in result['results']:
                                frames = sub_result['score']
                                elapsed_time = sub_result['elapsed_time'] / 1000
                                fps = frames / elapsed_time
                                context.result.add_metric(
                                    'score' + suffix, frames, 'frames')
                                context.result.add_metric('fps' + suffix, fps)
                        except ValueError:
                            self.logger.warning(
                                'Could not parse result for iteration {}'.
                                format(iteration))
                        result_lines = []
                        iteration += 1
                    line = fh.next()
            except StopIteration:
                pass  # EOF
        if results:
            outfile = os.path.join(context.output_directory,
                                   'glb-results.json')
            with open(outfile, 'wb') as wfh:
                json.dump(results, wfh, indent=4)

    def _build_command(self):
        command_params = []
        command_params.append('-e test_ids "{}"'.format(self.test_id))
        if self.resolution:
            if not self.resolution.endswith('p'):
                self.resolution += 'p'
            for k, v in self.supported_resolutions[
                    self.resolution].iteritems():
                command_params.append('{} {}'.format(k, v))
        return 'am start -W -S -n {}/{} {}'.format(self.package, self.activity,
                                                   ' '.join(command_params))
Ejemplo n.º 13
0
class CoreUtilization(Instrument):

    name = 'coreutil'
    description = """
    Measures CPU core activity during workload execution in terms of the percentage of time a number
    of cores were utilized above the specfied threshold.

    This workload generates ``coreutil.csv`` report in the workload's output directory. The report is
    formatted as follows::

        <threshold,1core,2core,3core,4core
        18.098132,38.650248000000005,10.736180000000001,3.6809760000000002,28.834312000000001

    Interpretation of the result:

     - 38.65% of total time only single core is running above or equal to threshold value
     - 10.736% of total time two cores are running simultaneously above or equal to threshold value
     - 3.6809% of total time three cores are running simultaneously above or equal to threshold value
     - 28.8314% of total time four cores are running simultaneously above or equal to threshold value
     - 18.098% of time all core are running below threshold value.

    ..note : This instrument doesn't work on ARM big.LITTLE IKS implementation

    """

    parameters = [
        Parameter(
            'threshold',
            kind=int,
            default=50,
            constraint=lambda x: 0 < x <= 100,
            description=
            'Cores with percentage utilization above this value will be considered '
            'as "utilized". This value may need to be adjusted based on the background '
            'activity and the intensity of the workload being instrumented (e.g. it may '
            'need to be lowered for low-intensity workloads such as video playback).'
        )
    ]

    def __init__(self, device, **kwargs):
        super(CoreUtilization, self).__init__(device, **kwargs)
        self.collector = None
        self.output_dir = None
        self.cores = None
        self.output_artifact_registered = False

    def setup(self, context):
        ''' Calls ProcCollect class '''
        self.output_dir = context.output_directory
        self.collector = ProcCollect(self.device, self.logger, self.output_dir)
        self.cores = self.device.number_of_cores

    def start(self, context):  # pylint: disable=W0613
        ''' Starts collecting data once the workload starts '''
        self.logger.debug('Starting to collect /proc/stat data')
        self.collector.start()

    def stop(self, context):  # pylint: disable=W0613
        ''' Stops collecting data once the workload stops '''
        self.logger.debug('Stopping /proc/stat data collection')
        self.collector.stop()

    def update_result(self, context):
        ''' updates result into coreutil.csv '''
        self.collector.join()  # wait for "proc.txt" to generate.
        context.add_artifact('proctxt', 'proc.txt', 'raw')
        calc = Calculator(self.cores, self.threshold, context)  # pylint: disable=E1101
        calc.calculate()
        if not self.output_artifact_registered:
            context.add_run_artifact('cpuutil', 'coreutil.csv', 'data')
            self.output_artifact_registered = True
Ejemplo n.º 14
0
class BBench(Workload):

    name = 'bbench'
    description = """
    BBench workload opens the built-in browser and navigates to, and
    scrolls through, some preloaded web pages and ends the workload by trying to
    connect to a local server it runs after it starts. It can also play the
    workload while it plays an audio file in the background.

    """

    summary_metrics = ['Mean Latency']

    parameters = [
        Parameter(
            'with_audio',
            kind=boolean,
            default=False,
            description=
            ('Specifies whether an MP3 should be played in the background during '
             'workload execution.')),
        Parameter(
            'server_timeout',
            kind=int,
            default=300,
            description=
            'Specifies the timeout (in seconds) before the server is stopped.'
        ),
        Parameter(
            'force_dependency_push',
            kind=boolean,
            default=False,
            description=
            ('Specifies whether to push dependency files to the device to the device '
             'if they are already on it.')),
        Parameter(
            'audio_file',
            default=os.path.join(settings.dependencies_directory,
                                 'Canon_in_D_Piano.mp3'),
            description=
            ('The (on-host) path to the audio file to be played. This is only used if '
             '``with_audio`` is ``True``.')),
        Parameter(
            'perform_cleanup',
            kind=boolean,
            default=False,
            description=
            'If ``True``, workload files on the device will be deleted after execution.'
        ),
        Parameter(
            'clear_file_cache',
            kind=boolean,
            default=True,
            description=
            'Clear the the file cache on the target device prior to running the workload.'
        ),
        Parameter('browser_package',
                  default='com.android.browser',
                  description=
                  'Specifies the package name of the device\'s browser app.'),
        Parameter(
            'browser_activity',
            default='.BrowserActivity',
            description=
            'Specifies the startup activity  name of the device\'s browser app.'
        ),
    ]

    aliases = [
        Alias('bbench_with_audio', with_audio=True),
    ]

    supported_platforms = ['android']

    def setup(self, context):  # NOQA
        self.bbench_on_device = '/'.join(
            [self.device.working_directory, 'bbench'])
        self.bbench_server_on_device = os.path.join(
            self.device.working_directory, BBENCH_SERVER_NAME)
        self.audio_on_device = os.path.join(self.device.working_directory,
                                            DEFAULT_AUDIO_FILE_NAME)
        self.index_noinput = 'file:///{}'.format(
            self.bbench_on_device) + '/index_noinput.html'

        if not os.path.isdir(os.path.join(self.dependencies_directory,
                                          "sites")):
            self._download_bbench_file()
        if self.with_audio and not os.path.isfile(self.audio_file):
            self._download_audio_file()

        if not os.path.isdir(self.dependencies_directory):
            raise ConfigError('Bbench directory does not exist: {}'.format(
                self.dependencies_directory))
        self._apply_patches()

        if self.with_audio:
            if self.force_dependency_push or not self.device.file_exists(
                    self.audio_on_device):
                self.device.push_file(self.audio_file,
                                      self.audio_on_device,
                                      timeout=120)

        # Push the bbench site pages and http server to target device
        if self.force_dependency_push or not self.device.file_exists(
                self.bbench_on_device):
            self.logger.debug('Copying bbench sites to device.')
            self.device.push_file(self.dependencies_directory,
                                  self.bbench_on_device,
                                  timeout=300)

        # Push the bbench server
        host_binary = context.resolver.get(
            Executable(self, self.device.abi, 'bbench_server'))
        device_binary = self.device.install(host_binary)
        self.luanch_server_command = '{} {}'.format(device_binary,
                                                    self.server_timeout)

        # Open the browser with default page
        self.device.execute('am start -n  {}/{} about:blank'.format(
            self.browser_package, self.browser_activity))
        time.sleep(5)

        # Stop the browser if already running and wait for it to stop
        self.device.execute('am force-stop {}'.format(self.browser_package))
        time.sleep(5)

        # Clear the logs
        self.device.clear_logcat()

        # clear browser cache
        self.device.execute('pm clear {}'.format(self.browser_package))
        if self.clear_file_cache:
            self.device.execute('sync')
            self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)

        #On android 6+ the web browser requires permissions to access the sd card
        if self.device.get_sdk_version() >= 23:
            self.device.execute(
                "pm grant com.android.browser android.permission.READ_EXTERNAL_STORAGE"
            )
            self.device.execute(
                "pm grant com.android.browser android.permission.WRITE_EXTERNAL_STORAGE"
            )

        # Launch the background music
        if self.with_audio:
            self.device.execute(
                'am start -W -S -n com.android.music/.MediaPlaybackActivity -d {}'
                .format(self.audio_on_device))

    def run(self, context):
        # Launch the bbench
        self.device.execute('am start -n  {}/{} {}'.format(
            self.browser_package, self.browser_activity, self.index_noinput))
        time.sleep(5)  # WA1 parity
        # Launch the server waiting for Bbench to complete
        self.device.execute(self.luanch_server_command, self.server_timeout)

    def update_result(self, context):
        # Stop the browser
        self.device.execute('am force-stop {}'.format(self.browser_package))

        # Stop the music
        if self.with_audio:
            self.device.execute('am force-stop com.android.music')

        # Get index_no_input.html
        indexfile = os.path.join(self.device.working_directory,
                                 'bbench/index_noinput.html')
        self.device.pull_file(indexfile, context.output_directory)

        # Get the logs
        output_file = os.path.join(self.device.working_directory,
                                   'browser_bbench_logcat.txt')
        self.device.execute('logcat -v time -d > {}'.format(output_file))
        self.device.pull_file(output_file, context.output_directory)

        metrics = _parse_metrics(
            os.path.join(context.output_directory,
                         'browser_bbench_logcat.txt'),
            os.path.join(context.output_directory, 'index_noinput.html'),
            context.output_directory)
        for key, values in metrics:
            for i, value in enumerate(values):
                metric = '{}_{}'.format(key, i) if i else key
                context.result.add_metric(metric,
                                          value,
                                          units='ms',
                                          lower_is_better=True)

    def teardown(self, context):
        if self.perform_cleanup:
            self.device.execute('rm -r {}'.format(self.bbench_on_device))
            self.device.execute('rm {}'.format(self.audio_on_device))

    def _download_audio_file(self):
        self.logger.debug('Downloadling audio file.')
        urllib.urlretrieve(DEFAULT_AUDIO_FILE, self.audio_file)

    def _download_bbench_file(self):
        # downloading the file to bbench_dir
        self.logger.debug('Downloading bbench dependencies.')
        full_file_path = os.path.join(self.dependencies_directory,
                                      DOWNLOADED_FILE_NAME)
        urllib.urlretrieve(DEFAULT_BBENCH_FILE, full_file_path)

        # Extracting Bbench to bbench_dir/
        self.logger.debug('Extracting bbench dependencies.')
        tar = tarfile.open(full_file_path)
        tar.extractall(os.path.dirname(self.dependencies_directory))

        # Removing not needed files and the compressed file
        os.remove(full_file_path)
        youtube_dir = os.path.join(self.dependencies_directory, 'sites',
                                   'youtube')
        os.remove(os.path.join(youtube_dir, 'www.youtube.com', 'kp.flv'))
        os.remove(os.path.join(youtube_dir, 'kp.flv'))

    def _apply_patches(self):
        self.logger.debug('Applying patches.')
        shutil.copy(os.path.join(PATCH_FILES, "bbench.js"),
                    self.dependencies_directory)
        shutil.copy(os.path.join(PATCH_FILES, "results.html"),
                    self.dependencies_directory)
        shutil.copy(os.path.join(PATCH_FILES, "index_noinput.html"),
                    self.dependencies_directory)
        shutil.copy(
            os.path.join(PATCH_FILES, "bbc.html"),
            os.path.join(self.dependencies_directory, "sites", "bbc",
                         "www.bbc.co.uk", "index.html"))
        shutil.copy(
            os.path.join(PATCH_FILES, "cnn.html"),
            os.path.join(self.dependencies_directory, "sites", "cnn",
                         "www.cnn.com", "index.html"))
        shutil.copy(
            os.path.join(PATCH_FILES, "twitter.html"),
            os.path.join(self.dependencies_directory, "sites", "twitter",
                         "twitter.com", "index.html"))
class Geekbench(AndroidUiAutoBenchmark):

    name = 'geekbench'
    description = """
    Geekbench provides a comprehensive set of benchmarks engineered to quickly
    and accurately measure processor and memory performance.

    http://www.primatelabs.com/geekbench/

    From the website:

    Designed to make benchmarks easy to run and easy to understand, Geekbench
    takes the guesswork out of producing robust and reliable benchmark results.

    Geekbench scores are calibrated against a baseline score of 1,000 (which is
    the score of a single-processor Power Mac G5 @ 1.6GHz). Higher scores are
    better, with double the score indicating double the performance.

    The benchmarks fall into one of four categories:

        - integer performance.
        - floating point performance.
        - memory performance.
        - stream performance.

    Geekbench benchmarks: http://www.primatelabs.com/geekbench/doc/benchmarks.html

    Geekbench scoring methedology:
    http://support.primatelabs.com/kb/geekbench/interpreting-geekbench-scores

    """
    summary_metrics = ['score', 'multicore_score']
    versions = {
        '3': {
            'package': 'com.primatelabs.geekbench3',
            'activity': '.HomeActivity',
        },
        '2': {
            'package': 'ca.primatelabs.geekbench2',
            'activity': '.HomeActivity',
        },
    }
    begin_regex = re.compile(
        r'^\s*D/WebViewClassic.loadDataWithBaseURL\(\s*\d+\s*\)'
        r'\s*:\s*(?P<content>\<.*)\s*$')
    replace_regex = re.compile(r'<[^>]*>')

    parameters = [
        Parameter(
            'version',
            default=sorted(versions.keys())[-1],
            allowed_values=sorted(versions.keys()),
            description='Specifies which version of the workload should be run.'
        ),
        Parameter(
            'times',
            kind=int,
            default=1,
            description=
            ('Specfies the number of times the benchmark will be run in a "tight '
             'loop", i.e. without performaing setup/teardown inbetween.')),
    ]

    @property
    def activity(self):
        return self.versions[self.version]['activity']

    @property
    def package(self):
        return self.versions[self.version]['package']

    def __init__(self, device, **kwargs):
        super(Geekbench, self).__init__(device, **kwargs)
        self.uiauto_params['version'] = self.version
        self.uiauto_params['times'] = self.times
        self.run_timeout = 5 * 60 * self.times

    def initialize(self, context):
        if self.version == '3' and not self.device.is_rooted:
            raise WorkloadError(
                'Geekbench workload only works on rooted devices.')

    def init_resources(self, context):
        self.apk_file = context.resolver.get(
            wlauto.common.android.resources.ApkFile(self),
            version=self.version)
        self.uiauto_file = context.resolver.get(
            wlauto.common.android.resources.JarFile(self))
        self.device_uiauto_file = self.device.path.join(
            self.device.working_directory, os.path.basename(self.uiauto_file))
        if not self.uiauto_package:
            self.uiauto_package = os.path.splitext(
                os.path.basename(self.uiauto_file))[0]

    def update_result(self, context):
        super(Geekbench, self).update_result(context)
        update_method = getattr(self, 'update_result_{}'.format(self.version))
        update_method(context)

    def validate(self):
        if (self.times > 1) and (self.version == '2'):
            raise ConfigError(
                'times parameter is not supported for version 2 of Geekbench.')

    def update_result_2(self, context):
        score_calculator = GBScoreCalculator()
        score_calculator.parse(self.logcat_log)
        score_calculator.update_results(context)

    def update_result_3(self, context):
        outfile_glob = self.device.path.join(
            self.device.package_data_directory, self.package, 'files', '*gb3')
        on_device_output_files = [
            f.strip()
            for f in self.device.execute('ls {}'.format(outfile_glob),
                                         as_root=True).split('\n') if f
        ]
        for i, on_device_output_file in enumerate(on_device_output_files):
            host_temp_file = tempfile.mktemp()
            self.device.pull_file(on_device_output_file, host_temp_file)
            host_output_file = os.path.join(
                context.output_directory,
                os.path.basename(on_device_output_file))
            with open(host_temp_file) as fh:
                data = json.load(fh)
            os.remove(host_temp_file)
            with open(host_output_file, 'w') as wfh:
                json.dump(data, wfh, indent=4)
            context.iteration_artifacts.append(
                Artifact('geekout',
                         path=os.path.basename(on_device_output_file),
                         kind='data',
                         description='Geekbench 3 output from device.'))
            context.result.add_metric(namemify('score', i), data['score'])
            context.result.add_metric(namemify('multicore_score', i),
                                      data['multicore_score'])
            for section in data['sections']:
                context.result.add_metric(
                    namemify(section['name'] + '_score', i), section['score'])
                context.result.add_metric(
                    namemify(section['name'] + '_multicore_score', i),
                    section['multicore_score'])
Ejemplo n.º 16
0
class Peacekeeper(AndroidUiAutoBenchmark):

    name = 'peacekeeper'
    description = """
    Peacekeeper is a free and fast browser test that measures a browser's speed.

    .. note::

       This workload requires a network connection as well as support for
       one of the two currently-supported browsers. Moreover, TC2 has
       compatibility issue with chrome

    """
    run_timeout = 15 * 60

    parameters = [
        Parameter('browser',
                  default='firefox',
                  allowed_values=['firefox', 'chrome'],
                  description='The browser to be benchmarked.'),
        Parameter('output_file',
                  default=None,
                  description=
                  """The result URL of peacekeeper benchmark will be written
                                 into this file on device after completion of peacekeeper benchmark.
                                 Defaults to peacekeeper.txt in the device's ``working_directory``.
                  """),
        Parameter('peacekeeper_url',
                  default='http://peacekeeper.futuremark.com/run.action',
                  description='The URL to run the peacekeeper benchmark.'),
    ]

    def __init__(self, device, **kwargs):
        super(Peacekeeper, self).__init__(device, **kwargs)
        self.variant_name = self.browser

    def update_result(self, context):
        super(Peacekeeper, self).update_result(context)
        url = None

        # Pull the result page url, which contains the results, from the
        # peacekeeper.txt file and process it
        self.device.pull_file(self.output_file, context.output_directory)
        result_file = os.path.join(context.output_directory, 'peacekeeper.txt')
        with open(result_file) as fh:
            for line in fh:
                url = line

        # Fetch the html page containing the results
        if not url:
            raise WorkloadError(
                'The url is empty, error while running peacekeeper benchmark')

        # urlopen expects url beginning with protocol.
        if not url.startswith('http'):
            url = 'http://' + url

        req = urllib2.Request(url)
        response = urllib2.urlopen(req)
        result_page = response.read()

        # Parse the HTML content using HTML parser
        parser = PeacekeeperParser()
        parser.feed(result_page)

        # Add peacekeeper_score into results file
        context.result.add_metric('peacekeeper_score',
                                  parser.peacekeeper_score)

    def validate(self):
        if self.output_file is None:
            self.output_file = os.path.join(self.device.working_directory,
                                            'peacekeeper.txt')
        if self.browser == 'chrome' and self.device == 'TC2':
            raise WorkloadError('Chrome not supported on TC2')

        self.uiauto_params['output_file'] = self.output_file
        self.uiauto_params['browser'] = self.browser
        self.uiauto_params['peacekeeper_url'] = self.peacekeeper_url

        self.package = BROWSER_MAP[self.browser]['package']
        self.activity = BROWSER_MAP[self.browser]['activity']
Ejemplo n.º 17
0
class Applaunch(AndroidUxPerfWorkload):

    name = 'applaunch'
    description = '''
    This workload launches and measures the launch time of applications for supporting workloads.

    Currently supported workloads are the ones that implement ``ApplaunchInterface``. For any
    workload to support this workload, it should implement the ``ApplaunchInterface``.
    The corresponding java file of the workload associated with the application being measured
    is executed during the run. The application that needs to be
    measured is passed as a parametre ``workload_name``. The parameters required for that workload
    have to be passed as a dictionary which is captured by the parametre ``workload_params``.
    This information can be obtained by inspecting the workload details of the specific workload.

    The workload allows to run multiple iterations of an application
    launch in two modes:

    1. Launch from background
    2. Launch from long-idle

    These modes are captured as a parameter applaunch_type.

    ``launch_from_background``
        Launches an application after the application is sent to background by
        pressing Home button.

    ``launch_from_long-idle``
        Launches an application after killing an application process and
        clearing all the caches.

    **Test Description:**

    -   During the initialization and setup, the application being launched is launched
        for the first time. The jar file of the workload of the application
        is moved to device at the location ``workdir`` which further implements the methods
        needed to measure the application launch time.

    -   Run phase calls the UiAutomator of the applaunch which runs in two subphases.
            A.  Applaunch Setup Run:
                    During this phase, welcome screens and dialogues during the first launch
                    of the instrumented application are cleared.
            B.  Applaunch Metric Run:
                    During this phase, the application is launched multiple times determined by
                    the iteration number specified by the parametre ``applaunch_iterations``.
                    Each of these iterations are instrumented to capture the launch time taken
                    and the values are recorded as UXPERF marker values in logfile.
    '''
    supported_platforms = ['android']

    parameters = [
        Parameter('workload_name',
                  kind=str,
                  description='Name of the uxperf workload to launch',
                  default='gmail'),
        Parameter('workload_params',
                  kind=dict,
                  default={},
                  description="""
                  parameters of the uxperf workload whose application launch
                  time is measured
                  """),
        Parameter(
            'applaunch_type',
            kind=str,
            default='launch_from_background',
            allowed_values=['launch_from_background', 'launch_from_long-idle'],
            description="""
                  Choose launch_from_long-idle for measuring launch time
                  from long-idle. These two types are described in the class
                  description.
                  """),
        Parameter('applaunch_iterations',
                  kind=int,
                  default=1,
                  description="""
                  Number of iterations of the application launch
                  """),
        Parameter('report_results',
                  kind=bool,
                  default=True,
                  description="""
                  Choose to report results of the application launch time.
                  """),
    ]

    def __init__(self, device, **kwargs):
        super(Applaunch, self).__init__(device, **kwargs)

    def init_resources(self, context):
        super(Applaunch, self).init_resources(context)
        loader = ExtensionLoader(packages=settings.extension_packages,
                                 paths=settings.extension_paths)
        self.workload_params['markers_enabled'] = True
        self.workload = loader.get_workload(self.workload_name, self.device,
                                            **self.workload_params)
        self.init_workload_resources(context)
        self.package = self.workload.package

    def init_workload_resources(self, context):
        self.workload.uiauto_file = context.resolver.get(
            wlauto.common.android.resources.JarFile(self.workload))
        if not self.workload.uiauto_file:
            raise ResourceError(
                'No UI automation JAR file found for workload {}.'.format(
                    self.workload.name))
        self.workload.device_uiauto_file = self.device.path.join(
            self.device.working_directory,
            os.path.basename(self.workload.uiauto_file))
        if not self.workload.uiauto_package:
            self.workload.uiauto_package = os.path.splitext(
                os.path.basename(self.workload.uiauto_file))[0]

    def validate(self):
        super(Applaunch, self).validate()
        self.workload.validate()
        self.pass_parameters()

    def pass_parameters(self):
        self.uiauto_params['workload'] = self.workload.name
        self.uiauto_params['package'] = self.workload.package
        self.uiauto_params[
            'binaries_directory'] = self.device.binaries_directory
        self.uiauto_params.update(self.workload.uiauto_params)
        if self.workload.activity:
            self.uiauto_params['launch_activity'] = self.workload.activity
        else:
            self.uiauto_params['launch_activity'] = "None"
        self.uiauto_params['applaunch_type'] = self.applaunch_type
        self.uiauto_params['applaunch_iterations'] = self.applaunch_iterations

    def setup(self, context):
        AndroidBenchmark.setup(self.workload, context)
        if not self.workload.launch_main:
            self.workload.launch_app()
        UiAutomatorWorkload.setup(self, context)
        self.workload.device.push_file(self.workload.uiauto_file,
                                       self.workload.device_uiauto_file)

    def run(self, context):
        UiAutomatorWorkload.run(self, context)

    def update_result(self, context):
        super(Applaunch, self).update_result(context)
        if self.report_results:
            parser = UxPerfParser(context, prefix='applaunch_')
            logfile = os.path.join(context.output_directory, 'logcat.log')
            parser.parse(logfile)
            parser.add_action_timings()

    def teardown(self, context):
        super(Applaunch, self).teardown(context)
        AndroidBenchmark.teardown(self.workload, context)
        UiAutomatorWorkload.teardown(self.workload, context)
        #Workload uses Dexclass loader while loading the jar file of the instrumented workload.
        #Dexclassloader unzips and generates .dex file in the .jar directory during the run.
        device_uiauto_dex_file = self.workload.device_uiauto_file.replace(
            ".jar", ".dex")
        self.workload.device.delete_file(
            self.device.path.join(self.device.binaries_directory,
                                  device_uiauto_dex_file))
Ejemplo n.º 18
0
class StressNg(Workload):

    name = 'stress_ng'
    description = """
    stress-ng will stress test a computer system in various selectable ways. It
    was designed to exercise various physical subsystems of a computer as well
    as the various operating system kernel interfaces.

    stress-ng can also measure test throughput rates; this can be useful to
    observe performance changes across different operating system releases or
    types of hardware. However, it has never been intended to be used as a
    precise benchmark test suite, so do NOT use it in this manner.

    The official website for stress-ng is at:
        http://kernel.ubuntu.com/~cking/stress-ng/

    Source code are available from:
        http://kernel.ubuntu.com/git/cking/stress-ng.git/
    """

    parameters = [
        Parameter('stressor',
                  kind=str,
                  default='cpu',
                  allowed_values=[
                      'cpu', 'io', 'fork', 'switch', 'vm', 'pipe', 'yield',
                      'hdd', 'cache', 'sock', 'fallocate', 'flock', 'affinity',
                      'timer', 'dentry', 'urandom', 'sem', 'open', 'sigq',
                      'poll'
                  ],
                  description='Stress test case name. The cases listed in '
                  'allowed values come from the stable release '
                  'version 0.01.32. The binary included here '
                  'compiled from dev version 0.06.01. Refer to '
                  'man page for the definition of each stressor.'),
        Parameter('threads',
                  kind=int,
                  default=0,
                  description='The number of workers to run. Specifying a '
                  'negative or zero value will select the number '
                  'of online processors.'),
        Parameter('duration',
                  kind=int,
                  default=60,
                  description='Timeout for test execution in seconds')
    ]

    def initialize(self, context):
        if not self.device.is_rooted:
            raise WorkloadError('stress-ng requires root premissions to run')

    def validate(self):
        if self.stressor == 'vm' and self.duration < 60:
            raise ConfigError('vm test duration need to be >= 60s.')

    def setup(self, context):
        host_binary = context.resolver.get(
            Executable(self, self.device.abi, 'stress-ng'))
        self.binary = self.device.install_if_needed(host_binary)
        self.log = self.device.path.join(self.device.working_directory,
                                         'stress_ng_output.txt')
        self.results = self.device.path.join(self.device.working_directory,
                                             'stress_ng_results.yaml')
        self.command = ('{} --{} {} --timeout {}s --log-file {} --yaml {} '
                        '--metrics-brief --verbose'.format(
                            self.binary, self.stressor, self.threads,
                            self.duration, self.log, self.results))
        self.timeout = self.duration + 10

    def run(self, context):
        self.output = self.device.execute(self.command,
                                          timeout=self.timeout,
                                          as_root=True)

    def update_result(self, context):
        host_file_log = os.path.join(context.output_directory,
                                     'stress_ng_output.txt')
        host_file_results = os.path.join(context.output_directory,
                                         'stress_ng_results.yaml')
        self.device.pull_file(self.log, host_file_log)
        self.device.pull_file(self.results, host_file_results)

        with open(host_file_results, 'r') as stress_ng_results:
            results = yaml.load(stress_ng_results)

        try:
            metric = results['metrics'][0]['stressor']
            throughput = results['metrics'][0]['bogo-ops']
            context.result.add_metric(metric, throughput, 'ops')
        # For some stressors like vm, if test duration is too short, stress_ng
        # may not able to produce test throughput rate.
        except TypeError:
            self.logger.warning(
                '{} test throughput rate not found. '
                'Please increase test duration and retry.'.format(
                    self.stressor))
Ejemplo n.º 19
0
class FreqSweep(Instrument):
    name = 'freq_sweep'
    description = """
    Sweeps workloads through all available frequencies on a device.

    When enabled this instrument will take all workloads specified in an agenda
    and run them at all available frequencies for all clusters.

    Recommendations:
        - Setting the runner to 'by_spec' increases the chance of successfully
          completing an agenda without encountering hotplug issues
        - If possible disable dynamic hotplug on the target device
        - This instrument does not automatically pin workloads to the cores
          being swept since it is not aware of what the workloads do.
          To achieve this use the workload's taskset parameter (if it has one).
    """

    parameters = [
        Parameter('sweeps',
                  kind=list,
                  description="""
                  By default this instrument will sweep across all available
                  frequencies for all available clusters. If you wish to only
                  sweep across certain frequencies on particular clusters you
                  can do so by specifying this parameter.

                  Sweeps should be a lists of dictionaries that can contain:
                   - Cluster (mandatory): The name of the cluster this sweep
                     will be performed on. E.g `A7`
                   - Frequencies: A list of frequencies (in KHz) to use. If
                     this is not provided all frequencies available for this
                     cluster will be used. E.g: `[800000, 900000, 100000]`
                   - label: Workload specs will be named
                     `{spec id}_{label}_{frequency}`. If a label is not
                     provided it will be named `sweep{sweep No.}`

                  Example sweep specification: ::

                      freq_sweep:
                          sweeps:
                              - cluster: A53
                                label: littles
                                frequencies: [800000, 900000, 100000]
                              - cluster: A57
                                label: bigs
                  """),
    ]

    def validate(self):
        if not self.device.core_names:
            raise ConfigError(
                'The Device does not appear to have core_names configured.')

    def initialize(self, context):  # pylint: disable=r0912
        if not self.device.is_rooted:
            raise InstrumentError(
                'The device must be rooted to sweep frequencies')

        if 'userspace' not in self.device.list_available_cluster_governors(0):
            raise InstrumentError(
                "'userspace' cpufreq governor must be enabled")

        # Create sweeps for each core type using num_cpus cores
        if not self.sweeps:
            self.sweeps = []
            for core in set(self.device.core_names):
                sweep_spec = {}
                sweep_spec['cluster'] = core
                sweep_spec['label'] = core
                self.sweeps.append(sweep_spec)

        new_specs = []
        old_specs = []
        for job in context.runner.job_queue:
            if job.spec not in old_specs:
                old_specs.append(job.spec)

        # Validate sweeps, add missing sections and create workload specs
        for i, sweep_spec in enumerate(self.sweeps):
            if 'cluster' not in sweep_spec:
                raise ConfigError('cluster must be define for all sweeps')
            # Check if cluster exists on device
            if caseless_string(
                    sweep_spec['cluster']) not in self.device.core_names:
                raise ConfigError(
                    'Only {} cores are present on this device, you specified {}'
                    .format(", ".join(set(self.device.core_names)),
                            sweep_spec['cluster']))

            # Default to all available frequencies
            if 'frequencies' not in sweep_spec:
                self.device.enable_cpu(
                    self.device.core_names.index(sweep_spec['cluster']))
                sweep_spec[
                    'frequencies'] = self.device.list_available_core_frequencies(
                        sweep_spec['cluster'])

            # Check that given frequencies are valid of the core cluster
            else:
                self.device.enable_cpu(
                    self.device.core_names.index(sweep_spec['cluster']))
                available_freqs = self.device.list_available_core_frequencies(
                    sweep_spec['cluster'])
                for freq in sweep_spec['frequencies']:
                    if freq not in available_freqs:
                        raise ConfigError(
                            'Frequency {} is not supported by {} cores'.format(
                                freq, sweep_spec['cluster']))

            # Add default labels
            if 'label' not in sweep_spec:
                sweep_spec['label'] = "sweep{}".format(i + 1)

            new_specs.extend(
                self.get_sweep_workload_specs(old_specs, sweep_spec, context))

        # Update config to refect jobs that will actually run.
        context.config.workload_specs = new_specs
        config_file = os.path.join(context.host_working_directory,
                                   'run_config.json')
        with open(config_file, 'wb') as wfh:
            context.config.serialize(wfh)
        context.runner.init_queue(new_specs)

    def get_sweep_workload_specs(self, old_specs, sweep_spec, context):
        new_specs = []
        for old_spec in old_specs:
            for freq in sweep_spec['frequencies']:
                spec = old_spec.copy()
                if 'runtime_params' in sweep_spec:
                    spec.runtime_parameters = merge_dicts(
                        spec.runtime_parameters,
                        sweep_spec['runtime_params'],
                        dict_type=OrderedDict)
                if 'workload_params' in sweep_spec:
                    spec.workload_parameters = merge_dicts(
                        spec.workload_parameters,
                        sweep_spec['workload_params'],
                        dict_type=OrderedDict)
                spec.runtime_parameters['{}_governor'.format(
                    sweep_spec['cluster'])] = "userspace"
                spec.runtime_parameters['{}_frequency'.format(
                    sweep_spec['cluster'])] = freq
                spec.id = '{}_{}_{}'.format(spec.id, sweep_spec['label'], freq)
                spec.classifiers['core'] = sweep_spec['cluster']
                spec.classifiers['freq'] = freq
                spec.load(self.device, context.config.ext_loader)
                spec.workload.init_resources(context)
                spec.workload.validate()
                new_specs.append(spec)
        return new_specs
Ejemplo n.º 20
0
class Gem5LinuxDevice(BaseGem5Device, LinuxDevice):
    """
    Implements gem5 Linux device.

    This class allows a user to connect WA to a simulation using gem5. The
    connection to the device is made using the telnet connection of the
    simulator, and is used for all commands. The simulator does not have ADB
    support, and therefore we need to fall back to using standard shell
    commands.

    Files are copied into the simulation using a VirtIO 9P device in gem5. Files
    are copied out of the simulated environment using the m5 writefile command
    within the simulated system.

    When starting the workload run, the simulator is automatically started by
    Workload Automation, and a connection to the simulator is established. WA
    will then wait for Android to boot on the simulated system (which can take
    hours), prior to executing any other commands on the device. It is also
    possible to resume from a checkpoint when starting the simulation. To do
    this, please append the relevant checkpoint commands from the gem5
    simulation script to the gem5_discription argument in the agenda.

    Host system requirements:
        * VirtIO support. We rely on diod on the host system. This can be
          installed on ubuntu using the following command:

                sudo apt-get install diod

    Guest requirements:
        * VirtIO support. We rely on VirtIO to move files into the simulation.
          Please make sure that the following are set in the kernel
          configuration:

                CONFIG_NET_9P=y

                CONFIG_NET_9P_VIRTIO=y

                CONFIG_9P_FS=y

                CONFIG_9P_FS_POSIX_ACL=y

                CONFIG_9P_FS_SECURITY=y

                CONFIG_VIRTIO_BLK=y

        * m5 binary. Please make sure that the m5 binary is on the device and
          can by found in the path.
    """

    name = 'gem5_linux'
    platform = 'linux'

    parameters = [
        Parameter('core_names', default=[], override=True),
        Parameter('core_clusters', default=[], override=True),
        Parameter('host',
                  default='localhost',
                  override=True,
                  description='Host name or IP address for the device.'),
        Parameter('login_prompt',
                  kind=types.list_of_strs,
                  default=['login:'******'AEL login:'******'username:'******'login_password_prompt',
                  kind=types.list_of_strs,
                  default=['password:'******'Gem5LinuxDevice')
        LinuxDevice.__init__(self, **kwargs)
        BaseGem5Device.__init__(self)

    def login_to_device(self):
        # Wait for the login prompt
        prompt = self.login_prompt + [self.sckt.UNIQUE_PROMPT]
        i = self.sckt.expect(prompt, timeout=10)
        # Check if we are already at a prompt, or if we need to log in.
        if i < len(prompt) - 1:
            self.sckt.sendline("{}".format(self.username))
            password_prompt = self.login_password_prompt + [
                r'# ', self.sckt.UNIQUE_PROMPT
            ]
            j = self.sckt.expect(password_prompt, timeout=self.delay)
            if j < len(password_prompt) - 2:
                self.sckt.sendline("{}".format(self.password))
                self.sckt.expect([r'# ', self.sckt.UNIQUE_PROMPT],
                                 timeout=self.delay)

    def capture_screen(self, filepath):
        if BaseGem5Device.capture_screen(self, filepath):
            return

        # If we didn't manage to do the above, call the parent class.
        self.logger.warning(
            "capture_screen: falling back to parent class implementation")
        LinuxDevice.capture_screen(self, filepath)

    def initialize(self, context):
        self.resize_shell()
        self.deploy_m5(context, force=False)
Ejemplo n.º 21
0
class MongodbUploader(ResultProcessor):

    name = 'mongodb'
    description = """
    Uploads run results to a MongoDB instance.

    MongoDB is a popular document-based data store (NoSQL database).

    """

    parameters = [
        Parameter(
            'uri',
            kind=str,
            default=None,
            description=
            """Connection URI. If specified, this will be used for connecting
                                 to the backend, and host/port parameters will be ignored."""
        ),
        Parameter(
            'host',
            kind=str,
            default='localhost',
            mandatory=True,
            description=
            'IP address/name of the machinge hosting the MongoDB server.'),
        Parameter(
            'port',
            kind=int,
            default=27017,
            mandatory=True,
            description='Port on which the MongoDB server is listening.'),
        Parameter(
            'db',
            kind=str,
            default='wa',
            mandatory=True,
            description='Database on the server used to store WA results.'),
        Parameter(
            'extra_params',
            kind=dict,
            default={},
            description=
            '''Additional connection parameters may be specfied using this (see
                                 pymongo documentation.'''),
        Parameter(
            'authentication',
            kind=dict,
            default={},
            description=
            '''If specified, this will be passed to db.authenticate() upon connection;
                                 please pymongo documentaion authentication examples for detail.'''
        ),
    ]

    def initialize(self, context):
        if pymongo is None:
            raise ResultProcessorError(
                'mongodb result processor requres pymongo package to be installed.'
            )
        try:
            self.client = pymongo.MongoClient(self.host, self.port,
                                              **self.extra_params)
        except pymongo.errors.PyMongoError, e:
            raise ResultProcessorError(
                'Error connecting to mongod: {}'.fromat(e))
        self.dbc = self.client[self.db]
        self.fs = GridFS(self.dbc)
        if self.authentication:
            if not self.dbc.authenticate(**self.authentication):
                raise ResultProcessorError(
                    'Authentication to database {} failed.'.format(self.db))

        self.run_result_dbid = ObjectId()
        run_doc = context.run_info.to_dict()

        wa_adapter = run_doc['device']
        devprops = dict((k.translate(KEY_TRANS_TABLE), v)
                        for k, v in run_doc['device_properties'].iteritems())
        run_doc['device'] = devprops
        run_doc['device']['wa_adapter'] = wa_adapter
        del run_doc['device_properties']

        run_doc['output_directory'] = os.path.abspath(context.output_directory)
        run_doc['artifacts'] = []
        run_doc['workloads'] = context.config.to_dict()['workload_specs']
        for workload in run_doc['workloads']:
            workload['name'] = workload['workload_name']
            del workload['workload_name']
            workload['results'] = []
        self.run_dbid = self.dbc.runs.insert(run_doc)

        prefix = context.run_info.project if context.run_info.project else '[NOPROJECT]'
        run_part = context.run_info.run_name or context.run_info.uuid.hex
        self.gridfs_dir = os.path.join(prefix, run_part)
        i = 0
        while self.gridfs_directory_exists(self.gridfs_dir):
            if self.gridfs_dir.endswith('-{}'.format(i)):
                self.gridfs_dir = self.gridfs_dir[:-2]
            i += 1
            self.gridfs_dir += '-{}'.format(i)

        # Keep track of all generated artefacts, so that we know what to
        # include in the tarball. The tarball will contains raw artificats
        # (other kinds would have been uploaded directly or do not contain
        # new data) and all files in the results dir that have not been marked
        # as artificats.
        self.artifacts = []
Ejemplo n.º 22
0
class FpsInstrument(Instrument):

    name = 'fps'
    description = """
    Measures Frames Per Second (FPS) and associated metrics for a workload.

    .. note:: This instrument depends on pandas Python library (which is not part of standard
              WA dependencies), so you will need to install that first, before you can use it.

    Android L and below use SurfaceFlinger to calculate the FPS data.
    Android M and above use gfxinfo to calculate the FPS data.

    SurfaceFlinger:
    The view is specified by the workload as ``view`` attribute. This defaults
    to ``'SurfaceView'`` for game workloads, and ``None`` for non-game
    workloads (as for them FPS mesurement usually doesn't make sense).
    Individual workloads may override this.

    gfxinfo:
    The view is specified by the workload as ``package`` attribute.
    This is because gfxinfo already processes for all views in a package.

    This instrument adds four metrics to the results:

        :FPS: Frames Per Second. This is the frame rate of the workload.
        :frame_count: The total number of frames rendered during the execution of
                 the workload.
        :janks: The number of "janks" that occured during execution of the
                workload. Janks are sudden shifts in frame rate. They result
                in a "stuttery" UI. See http://jankfree.org/jank-busters-io
        :not_at_vsync: The number of frames that did not render in a single
                       vsync cycle.

    """
    supported_platforms = ['android']

    parameters = [
        Parameter(
            'drop_threshold',
            kind=numeric,
            default=5,
            description='Data points below this FPS will be dropped as they '
            'do not constitute "real" gameplay. The assumption '
            'being that while actually running, the FPS in the '
            'game will not drop below X frames per second, '
            'except on loading screens, menus, etc, which '
            'should not contribute to FPS calculation. '),
        Parameter(
            'keep_raw',
            kind=boolean,
            default=False,
            description=
            'If set to ``True``, this will keep the raw dumpsys output '
            'in the results directory (this is maily used for debugging) '
            'Note: frames.csv with collected frames data will always be '
            'generated regardless of this setting.'),
        Parameter('generate_csv',
                  kind=boolean,
                  default=True,
                  description=
                  'If set to ``True``, this will produce temporal fps data '
                  'in the results directory, in a file named fps.csv '
                  'Note: fps data will appear as discrete step-like values '
                  'in order to produce a more meainingfull representation,'
                  'a rolling mean can be applied.'),
        Parameter('crash_check',
                  kind=boolean,
                  default=True,
                  description="""
                  Specifies wither the instrument should check for crashed content by examining
                  frame data. If this is set, ``execution_time`` instrument must also be installed.
                  The check is performed by using the measured FPS and exection time to estimate the expected
                  frames cound and comparing that against the measured frames count. The the ratio of
                  measured/expected is too low, then it is assumed that the content has crashed part way
                  during the run. What is "too low" is determined by ``crash_threshold``.

                  .. note:: This is not 100\% fool-proof. If the crash occurs sufficiently close to
                            workload's termination,  it may not be detected. If this is expected, the
                            threshold may be adjusted up to compensate.
                  """),
        Parameter('crash_threshold',
                  kind=float,
                  default=0.7,
                  description="""
                  Specifies the threshold used to decided whether a measured/expected frames ration indicates
                  a content crash. E.g. a value of ``0.75`` means the number of actual frames counted is a
                  quarter lower than expected, it will treated as a content crash.
                  """),
        Parameter('dumpsys_period',
                  kind=float,
                  default=2,
                  constraint=lambda x: x > 0,
                  description="""
                  Specifies the time period between calls to ``dumpsys SurfaceFlinger --latency`` in
                  seconds when collecting frame data. Using a lower value improves the granularity
                  of timings when recording actions that take a short time to complete. Note, this
                  will produce duplicate frame data in the raw dumpsys output, however, this is
                  filtered out in frames.csv. It may also affect the overall load on the system.

                  The default value of 2 seconds corresponds with the NUM_FRAME_RECORDS in
                  android/services/surfaceflinger/FrameTracker.h (as of the time of writing
                  currently 128) and a frame rate of 60 fps that is applicable to most devices.
                  """),
        Parameter('force_surfaceflinger',
                  kind=boolean,
                  default=False,
                  description="""
                  By default, the method to capture fps data is based on Android version.
                  If this is set to true, force the instrument to use the SurfaceFlinger method
                  regardless of its Android version.
                  """),
    ]

    def __init__(self, device, **kwargs):
        super(FpsInstrument, self).__init__(device, **kwargs)
        self.collector = None
        self.outfile = None
        self.fps_outfile = None
        self.is_enabled = True
        self.fps_method = ''

    def validate(self):
        if not pd or LooseVersion(pd.__version__) < LooseVersion('0.13.1'):
            message = (
                'fps instrument requires pandas Python package (version 0.13.1 or higher) to be installed.\n'
                'You can install it with pip, e.g. "sudo pip install pandas"')
            raise InstrumentError(message)
        if self.crash_check and not instrument_is_installed('execution_time'):
            raise ConfigError(
                'execution_time instrument must be installed in order to check for content crash.'
            )

    def setup(self, context):
        workload = context.workload
        if hasattr(workload, 'view'):
            self.fps_outfile = os.path.join(context.output_directory,
                                            'fps.csv')
            self.outfile = os.path.join(context.output_directory, 'frames.csv')
            # Android M brings a new method of collecting FPS data
            if not self.force_surfaceflinger and (
                    self.device.get_sdk_version() >= 23):
                # gfxinfo takes in the package name rather than a single view/activity
                # so there is no 'list_command' to run and compare against a list of
                # views/activities. Additionally, clearing the stats requires the package
                # so we need to clear for every package in the workload.
                # Usually there is only one package, but some workloads may run multiple
                # packages so each one must be reset before continuing
                self.fps_method = 'gfxinfo'
                runcmd = 'dumpsys gfxinfo {} framestats'
                lstcmd = None
                params = workload.package
                params = [params] if isinstance(params, basestring) else params
                for pkg in params:
                    self.device.execute('dumpsys gfxinfo {} reset'.format(pkg))
            else:
                self.fps_method = 'surfaceflinger'
                runcmd = 'dumpsys SurfaceFlinger --latency {}'
                lstcmd = 'dumpsys SurfaceFlinger --list'
                params = workload.view
                self.device.execute('dumpsys SurfaceFlinger --latency-clear ')

            self.collector = LatencyCollector(self.outfile, self.device, params
                                              or '', self.keep_raw,
                                              self.logger, self.dumpsys_period,
                                              runcmd, lstcmd, self.fps_method)
        else:
            self.logger.debug('Workload does not contain a view; disabling...')
            self.is_enabled = False

    def start(self, context):
        if self.is_enabled:
            self.logger.debug('Starting Frame Statistics collection...')
            self.collector.start()

    def stop(self, context):
        if self.is_enabled and self.collector.is_alive():
            self.logger.debug('Stopping Frame Statistics collection...')
            self.collector.stop()

    def update_result(self, context):
        if self.is_enabled:
            fps, frame_count, janks, not_at_vsync = float('nan'), 0, 0, 0
            p90, p95, p99 = [float('nan')] * 3
            data = pd.read_csv(self.outfile)
            if not data.empty:  # pylint: disable=maybe-no-member
                # gfxinfo method has an additional file generated that contains statistics
                stats_file = None
                if self.fps_method == 'gfxinfo':
                    stats_file = os.path.join(os.path.dirname(self.outfile),
                                              'gfxinfo.csv')
                fp = FpsProcessor(data, extra_data=stats_file)
                per_frame_fps, metrics = fp.process(
                    self.collector.refresh_period, self.drop_threshold)
                fps, frame_count, janks, not_at_vsync = metrics

                if self.generate_csv:
                    per_frame_fps.to_csv(self.fps_outfile,
                                         index=False,
                                         header=True)
                    context.add_artifact('fps', path='fps.csv', kind='data')

                p90, p95, p99 = fp.percentiles()

            context.result.add_metric('FPS', fps)
            context.result.add_metric('frame_count', frame_count)
            context.result.add_metric('janks', janks, lower_is_better=True)
            context.result.add_metric('not_at_vsync',
                                      not_at_vsync,
                                      lower_is_better=True)
            context.result.add_metric('frame_time_90percentile',
                                      p90,
                                      'ms',
                                      lower_is_better=True)
            context.result.add_metric('frame_time_95percentile',
                                      p95,
                                      'ms',
                                      lower_is_better=True)
            context.result.add_metric('frame_time_99percentile',
                                      p99,
                                      'ms',
                                      lower_is_better=True)

    def slow_update_result(self, context):
        result = context.result
        if self.crash_check and result.has_metric('execution_time'):
            self.logger.debug('Checking for crashed content.')
            exec_time = result['execution_time'].value
            fps = result['FPS'].value
            frames = result['frame_count'].value
            if all([exec_time, fps, frames]):
                expected_frames = fps * exec_time
                ratio = frames / expected_frames
                self.logger.debug(
                    'actual/expected frames: {:.2}'.format(ratio))
                if ratio < self.crash_threshold:
                    self.logger.error(
                        'Content for {} appears to have crashed.'.format(
                            context.spec.label))
                    result.status = IterationResult.FAILED
                    result.add_event(
                        'Content crash detected (actual/expected frames: {:.2}).'
                        .format(ratio))
Ejemplo n.º 23
0
class RemoteFilerGetter(ResourceGetter):

    name = 'filer_assets'
    description = """
    Finds resources on a (locally mounted) remote filer and caches them locally.

    This assumes that the filer is mounted on the local machine (e.g. as a samba share).

    """
    priority = GetterPriority.remote
    resource_type = ['apk', 'file', 'jar', 'revent']

    parameters = [
        Parameter(
            'remote_path',
            global_alias='remote_assets_path',
            default='',
            description=
            """Path, on the local system, where the assets are located."""),
        Parameter(
            'always_fetch',
            kind=boolean,
            default=False,
            global_alias='always_fetch_remote_assets',
            description=
            """If ``True``, will always attempt to fetch assets from the remote, even if
                                 a local cached copy is available."""),
    ]

    def get(self, resource, **kwargs):
        version = kwargs.get('version')
        if resource.owner:
            remote_path = os.path.join(self.remote_path, resource.owner.name)
            local_path = os.path.join(settings.environment_root, '__filer',
                                      resource.owner.dependencies_directory)
            message = 'resource={}, version={}, remote_path={}, local_path={}'
            self.logger.debug(
                message.format(resource, version, remote_path, local_path))
            return self.try_get_resource(resource, version, remote_path,
                                         local_path)
        else:
            result = None
            for entry in os.listdir(remote_path):
                remote_path = os.path.join(self.remote_path, entry)
                local_path = os.path.join(settings.environment_root, '__filer',
                                          settings.dependencies_directory,
                                          entry)
                result = self.try_get_resource(resource, version, remote_path,
                                               local_path)
                if result:
                    break
            return result

    def try_get_resource(self, resource, version, remote_path, local_path):
        if not self.always_fetch:
            result = self.get_from(resource, version, local_path)
            if result:
                return result
        if remote_path:
            # Didn't find it cached locally; now check the remoted
            result = self.get_from(resource, version, remote_path)
            if not result:
                return result
        else:  # remote path is not set
            return None
        # Found it remotely, cache locally, then return it
        local_full_path = os.path.join(_d(local_path),
                                       os.path.basename(result))
        self.logger.debug('cp {} {}'.format(result, local_full_path))
        shutil.copy(result, local_full_path)
        return local_full_path

    def get_from(self, resource, version, location):  # pylint: disable=no-self-use
        # pylint: disable=too-many-branches
        if resource.name in ['apk', 'jar']:
            return get_from_location_by_extension(resource, location,
                                                  resource.name, version)
        elif resource.name == 'file':
            filepath = os.path.join(location, resource.path)
            if os.path.exists(filepath):
                return filepath
        elif resource.name == 'revent':
            device_model = resource.owner.device.get_device_model()
            wa_device_name = resource.owner.device.name
            for name in [device_model, wa_device_name]:
                if not name:
                    continue
                filename = '.'.join([name, resource.stage, 'revent']).lower()
                alternate_location = os.path.join(location, 'revent_files')
                # There tends to be some confusion as to where revent files should
                # be placed. This looks both in the extension's directory, and in
                # 'revent_files' subdirectory under it, if it exists.
                path = None
                if os.path.isdir(alternate_location):
                    for candidate in os.listdir(alternate_location):
                        if candidate.lower() == filename.lower():
                            path = os.path.join(alternate_location, candidate)
                if os.path.isdir(location):
                    for candidate in os.listdir(location):
                        if candidate.lower() == filename.lower():
                            path = os.path.join(location, candidate)
                if path:
                    try:
                        ReventRecording(path).close()  # Check valid recording
                        return path
                    except ValueError as e:
                        self.logger.warning(e.message)

        else:
            raise ValueError('Unexpected resource type: {}'.format(
                resource.name))
Ejemplo n.º 24
0
class CpuStatesProcessor(ResultProcessor):

    name = 'cpustates'
    description = '''
    Process power ftrace to produce CPU state and parallelism stats.

    Parses trace-cmd output to extract power events and uses those to generate
    statistics about parallelism and frequency/idle core residency.

    .. note:: trace-cmd instrument must be enabled and configured to collect
              at least ``power:cpu_idle`` and ``power:cpu_frequency`` events.
              Reporting should also be enabled (it is by default) as
              ``cpustate`` parses the text version of the trace.
              Finally, the device should have ``cpuidle`` module installed.

    This generates two reports for the run:

    *parallel.csv*

    Shows what percentage of time was spent with N cores active (for N
    from 0 to the total number of cores), for a cluster or for a system as
    a whole. It contain the following columns:

        :workload: The workload label
        :iteration: iteration that was run
        :cluster: The cluster for which statics are reported. The value of
                  ``"all"`` indicates that this row reports statistics for
                  the whole system.
        :number_of_cores: number of cores active. ``0`` indicates the cluster
                          was idle.
        :total_time: Total time spent in this state during workload execution
        :%time: Percentage of total workload execution time spent in this state
        :%running_time: Percentage of the time the cluster was active (i.e.
                        ignoring time the cluster was idling) spent in this
                        state.

    *cpustate.csv*

    Shows percentage of the time a core spent in a particular power state. The first
    column names the state is followed by a column for each core. Power states include
    available DVFS frequencies (for heterogeneous systems, this is the union of
    frequencies supported by different core types) and idle states. Some shallow
    states (e.g. ARM WFI) will consume different amount of power depending on the
    current OPP. For such states, there will be an entry for each opp. ``"unknown"``
    indicates the percentage of time for which a state could not be established from the
    trace. This is usually due to core state being unknown at the beginning of the trace,
    but may also be caused by dropped events in the middle of the trace.

    '''

    parameters = [
        Parameter('first_cluster_state', kind=int, default=2,
                  description="""
                  The first idle state which is common to a cluster.
                  """),
        Parameter('first_system_state', kind=int, default=3,
                  description="""
                  The first idle state which is common to all cores.
                  """),
        Parameter('write_iteration_reports', kind=bool, default=False,
                  description="""
                  By default, this instrument will generate reports for the entire run
                  in the overall output directory. Enabling this option will, in addition,
                  create reports in each iteration's output directory. The formats of these
                  reports will be similar to the overall report, except they won't mention
                  the workload name or iteration number (as that is implied by their location).
                  """),
        Parameter('use_ratios', kind=bool, default=False,
                  description="""
                  By default proportional values will be reported as percentages, if this
                  flag is enabled, they will be reported as ratios instead.
                  """),
        Parameter('create_timeline', kind=bool, default=True,
                  description="""
                  Create a CSV with the timeline of core power states over the course of the run
                  as well as the usual stats reports.
                  """),
        Parameter('create_utilization_timeline', kind=bool, default=False,
                  description="""
                  Create a CSV with the timeline of cpu(s) utilisation over the course of the run
                  as well as the usual stats reports.
                  The values generated are floating point numbers, normalised based on the maximum
                  frequency of the cluster.
                  """),
    ]

    def validate(self):
        if not instrument_is_installed('trace-cmd'):
            message = '''
            {} requires "trace-cmd" instrument to be installed and the collection of at
            least "power:cpu_frequency" and "power:cpu_idle" events to be enabled during worklad
            execution.
            '''
            raise ConfigError(message.format(self.name).strip())

    def initialize(self, context):
        # pylint: disable=attribute-defined-outside-init
        device = context.device
        for modname in ['cpuidle', 'cpufreq']:
            if not device.has(modname):
                message = 'Device does not appear to have {} capability; is the right module installed?'
                raise ConfigError(message.format(modname))
        if not device.core_names:
            message = '{} requires"core_names" and "core_clusters" to be specified for the device.'
            raise ConfigError(message.format(self.name))
        self.core_names = device.core_names
        self.core_clusters = device.core_clusters
        idle_states = {s.id: s.desc for s in device.get_cpuidle_states()}
        self.idle_state_names = [idle_states[i] for i in sorted(idle_states.keys())]
        self.num_idle_states = len(self.idle_state_names)
        self.iteration_reports = OrderedDict()
        self.max_freq_list = []
        # priority -19: just higher than the slow_start of instrumentation
        signal.connect(self.set_initial_state, signal.BEFORE_WORKLOAD_EXECUTION, priority=-19)

    def set_initial_state(self, context):
        # TODO: this does not play well with hotplug but leaving as-is, as this will be changed with
        # the devilib port anyway.
        # Write initial frequencies into the trace.
        # NOTE: this assumes per-cluster DVFS, that is valid for devices that
        # currently exist. This will need to be updated for per-CPU DFS.
        # pylint: disable=attribute-defined-outside-init
        self.logger.debug('Writing initial frequencies into trace...')
        device = context.device
        cluster_freqs = {}
        cluster_max_freqs = {}
        self.max_freq_list = []
        for c in unique(device.core_clusters):
            cluster_freqs[c] = device.get_cluster_cur_frequency(c)
            cluster_max_freqs[c] = device.get_cluster_max_frequency(c)
        for i, c in enumerate(device.core_clusters):
            self.max_freq_list.append(cluster_max_freqs[c])
            entry = 'CPU {} FREQUENCY: {} kHZ'.format(i, cluster_freqs[c])
            device.set_sysfile_value('/sys/kernel/debug/tracing/trace_marker',
                                     entry, verify=False)

        # Nudge each cpu to force idle state transitions in the trace
        self.logger.debug('Nudging all cores awake...')
        for i in xrange(len(device.core_names)):
            command = device.busybox + ' taskset 0x{:x} {}'
            device.execute(command.format(1 << i, 'ls'))

    def process_iteration_result(self, result, context):
        trace = context.get_artifact('txttrace')
        if not trace:
            self.logger.debug('Text trace does not appear to have been generated; skipping this iteration.')
            return
        self.logger.debug('Generating power state reports from trace...')
        if self.create_timeline:
            timeline_csv_file = os.path.join(context.output_directory, 'power_states.csv')
        else:
            timeline_csv_file = None
        if self.create_utilization_timeline:
            cpu_utilisation = os.path.join(context.output_directory, 'cpu_utilisation.csv')
        else:
            cpu_utilisation = None
        parallel_report, powerstate_report = report_power_stats(  # pylint: disable=unbalanced-tuple-unpacking
            trace_file=trace.path,
            idle_state_names=self.idle_state_names,
            core_names=self.core_names,
            core_clusters=self.core_clusters,
            num_idle_states=self.num_idle_states,
            first_cluster_state=self.first_cluster_state,
            first_system_state=self.first_system_state,
            use_ratios=self.use_ratios,
            timeline_csv_file=timeline_csv_file,
            cpu_utilisation=cpu_utilisation,
            max_freq_list=self.max_freq_list,
        )
        if parallel_report is None:
            self.logger.warning('No power state reports generated; are power '
                                'events enabled in the trace?')
            return
        else:
            self.logger.debug('Reports generated.')

        iteration_id = (context.spec.id, context.spec.label, context.current_iteration)
        self.iteration_reports[iteration_id] = (parallel_report, powerstate_report)
        if self.write_iteration_reports:
            self.logger.debug('Writing iteration reports')
            parallel_report.write(os.path.join(context.output_directory, 'parallel.csv'))
            powerstate_report.write(os.path.join(context.output_directory, 'cpustates.csv'))

    def process_run_result(self, result, context):  # pylint: disable=too-many-locals
        if not self.iteration_reports:
            self.logger.warning('No power state reports generated.')
            return

        parallel_rows = []
        powerstate_rows = []
        for iteration_id, reports in self.iteration_reports.iteritems():
            spec_id, workload, iteration = iteration_id
            parallel_report, powerstate_report = reports
            for record in parallel_report.values:
                parallel_rows.append([spec_id, workload, iteration] + record)
            for state in sorted(powerstate_report.state_stats):
                stats = powerstate_report.state_stats[state]
                powerstate_rows.append([spec_id, workload, iteration, state] +
                                       ['{:.3f}'.format(s if s is not None else 0)
                                           for s in stats])

        with open(os.path.join(context.output_directory, 'parallel.csv'), 'w') as wfh:
            writer = csv.writer(wfh)
            writer.writerow(['id', 'workload', 'iteration', 'cluster',
                             'number_of_cores', 'total_time',
                             '%time', '%running_time'])
            writer.writerows(parallel_rows)

        with open(os.path.join(context.output_directory, 'cpustate.csv'), 'w') as wfh:
            writer = csv.writer(wfh)
            headers = ['id', 'workload', 'iteration', 'state']
            headers += ['{} CPU{}'.format(c, i)
                        for i, c in enumerate(powerstate_report.core_names)]
            writer.writerow(headers)
            writer.writerows(powerstate_rows)
Ejemplo n.º 25
0
class Googleplaybooks(AndroidUxPerfWorkload):

    name = 'googleplaybooks'
    package = 'com.google.android.apps.books'
    min_apk_version = '3.9.37'
    activity = 'com.google.android.apps.books.app.BooksActivity'
    view = [
        package + '/com.google.android.apps.books.app.HomeActivity', package +
        '/com.android.vending/com.google.android.finsky.activities.MainActivity',
        package + '/com.google.android.apps.books.app.ReadingActivity',
        package +
        '/com.google.android.apps.books.app.TableOfContentsActivityLight'
    ]
    description = '''
    A workload to perform standard productivity tasks with googleplaybooks.
    This workload performs various tasks, such as searching for a book title
    online, browsing through a book, adding and removing notes, word searching,
    and querying information about the book.

    Test description:
     1. Open Google Play Books application
     2. Dismisses sync operation (if applicable)
     3. Searches for a book title
     4. Adds books to library if not already present
     5. Opens 'My Library' contents
     6. Opens selected  book
     7. Gestures are performed to swipe between pages and pinch zoom in and out of a page
     8. Selects a specified chapter based on page number from the navigation view
     9. Selects a word in the centre of screen and adds a test note to the page
    10. Removes the test note from the page (clean up)
    11. Searches for the number of occurrences of a common word throughout the book
    12. Switches page styles from 'Day' to 'Night' to 'Sepia' and back to 'Day'
    13. Uses the 'About this book' facility on the currently selected book

    NOTE: This workload requires a network connection (ideally, wifi) to run,
          a Google account to be setup on the device, and payment details for the account.
          Free books require payment details to have been setup otherwise it fails.
          Tip: Install the 'Google Opinion Rewards' app to bypass the need to enter valid
          card/bank detail.
    '''

    parameters = [
        Parameter(
            'search_book_title',
            kind=str,
            default=
            'Nikola Tesla: Imagination and the Man That Invented the 20th Century',
            description="""
                  The book title to search for within Google Play Books archive.
                  The book must either be already in the account's library, or free to purchase.
                  """),
        Parameter('library_book_title',
                  kind=str,
                  default='Nikola Tesla',
                  description="""
                  The book title to search for within My Library.
                  The Library name can differ (usually shorter) to the Store name.
                  If left blank, the ``search_book_title`` will be used.
                  """),
        Parameter('select_chapter_page_number',
                  kind=int,
                  default=4,
                  description="""
                  The Page Number to search for within a selected book's Chapter list.
                  Note: Accepts integers only.
                  """),
        Parameter('search_word',
                  kind=str,
                  default='the',
                  description="""
                  The word to search for within a selected book.
                  Note: Accepts single words only.
                  """),
        Parameter('account',
                  kind=str,
                  mandatory=False,
                  description="""
                  If you are running this workload on a device which has more than one
                  Google account setup, then this parameter is used to select which account
                  to select when prompted.
                  The account requires the book to have already been purchased or payment details
                  already associated with the account.
                  If omitted, the first account in the list will be selected if prompted.
                  """),
    ]

    # This workload relies on the internet so check that there is a working
    # internet connection
    requires_network = True

    def validate(self):
        super(Googleplaybooks, self).validate()
        self.uiauto_params[
            'search_book_title'] = self.search_book_title.replace(
                ' ', '0space0')
        # If library_book_title is blank, set it to the same as search_book_title
        if not self.library_book_title:  # pylint: disable=access-member-before-definition
            self.library_book_title = self.search_book_title  # pylint: disable=attribute-defined-outside-init
        self.uiauto_params[
            'library_book_title'] = self.library_book_title.replace(
                ' ', '0space0')
        self.uiauto_params[
            'chapter_page_number'] = self.select_chapter_page_number
        self.uiauto_params['search_word'] = self.search_word
        self.uiauto_params['account'] = self.account
Ejemplo n.º 26
0
class RtApp(Workload):
    # pylint: disable=no-member,attribute-defined-outside-init

    name = 'rt-app'
    description = """
    A test application that simulates configurable real-time periodic load.

    rt-app is a test application that starts multiple periodic threads in order to
    simulate a real-time periodic load. It supports SCHED_OTHER, SCHED_FIFO,
    SCHED_RR as well as the AQuoSA framework and SCHED_DEADLINE.

    The load is described using JSON-like config files. Below are a couple of simple
    examples.


    Simple use case which creates a thread that run 1ms then sleep 9ms
    until the use case is stopped with Ctrl+C:

    .. code-block:: json

        {
            "tasks" : {
                "thread0" : {
                    "loop" : -1,
                    "run" :   20000,
                    "sleep" : 80000
                }
            },
            "global" : {
                "duration" : 2,
                "calibration" : "CPU0",
                "default_policy" : "SCHED_OTHER",
                "pi_enabled" : false,
                "lock_pages" : false,
                "logdir" : "./",
                "log_basename" : "rt-app1",
                "ftrace" : false,
                "gnuplot" : true,
            }
        }


    Simple use case with 2 threads that runs for 10 ms and wake up each
    other until the use case is stopped with Ctrl+C

    .. code-block:: json

        {
            "tasks" : {
                "thread0" : {
                    "loop" : -1,
                    "run" :     10000,
                    "resume" : "thread1",
                    "suspend" : "thread0"
                },
                "thread1" : {
                    "loop" : -1,
                    "run" :     10000,
                    "resume" : "thread0",
                    "suspend" : "thread1"
                }
            }
        }

    Please refer to the existing configs in ``$WA_ROOT/wlauto/workloads/rt_app/use_case``
    for more examples.

    The version of rt-app currently used with this workload contains enhancements and
    modifications done by Linaro. The source code for this version may be obtained here:

    http://git.linaro.org/power/rt-app.git

    The upstream version of rt-app is hosted here:

    https://github.com/scheduler-tools/rt-app

    """

    parameters = [
        Parameter('config', kind=str, default='taskset',
                  description='''
                  Use case configuration file to run with rt-app. This may be
                  either the name of one of the "standard" configurations included
                  with the workload. or a path to a custom JSON file provided by
                  the user. Either way, the ".json" extension is implied and will
                  be added automatically if not specified in the argument.

                  The following is the list of standard configurations currently
                  included with the workload: {}

                  '''.format(', '.join(os.listdir(PACKAGED_USE_CASE_DIRECTORY)))),
        Parameter('duration', kind=int,
                  description='''
                  Duration of the workload execution in Seconds. If specified, this
                  will override the corresponding parameter in the JSON config.
                  '''),
        Parameter('taskset_mask', kind=int,
                  description='Constrain execution to specific CPUs.'),
        Parameter('uninstall_on_exit', kind=bool, default=False,
                  description="""
                  If set to ``True``, rt-app binary will be uninstalled from the device
                  at the end of the run.
                  """),
        Parameter('force_install', kind=bool, default=False,
                  description="""
                  If set to ``True``, rt-app binary will always be deployed to the
                  target device at the beginning of the run, regardless of whether it
                  was already installed there.
                  """),
    ]

    def initialize(self, context):
        # initialize() runs once per run. setting a class variable to make it
        # available to other instances of the workload
        RtApp.device_working_directory = self.device.path.join(self.device.working_directory,
                                                               'rt-app-working')
        RtApp.host_binary = context.resolver.get(Executable(self,
                                                            self.device.abi,
                                                            BINARY_NAME), strict=False)
        RtApp.workgen_script = context.resolver.get(File(self, 'workgen'))
        if not self.device.is_rooted:  # some use cases require root privileges
            raise WorkloadError('rt-app requires the device to be rooted.')
        self.device.execute('mkdir -p {}'.format(self.device_working_directory))
        self._deploy_rt_app_binary_if_necessary()

    def setup(self, context):
        self.log_basename = context.spec.label
        self.host_json_config = self._load_json_config(context)
        self.config_file_on_device = self.device.path.join(self.device_working_directory,
                                                           os.path.basename(self.host_json_config))
        self.device.push_file(self.host_json_config, self.config_file_on_device, timeout=60)
        self.command = '{} {}'.format(self.device_binary, self.config_file_on_device)

        time_buffer = 30
        self.timeout = self.duration + time_buffer

    def run(self, context):
        self.output = self.device.invoke(self.command,
                                         on_cpus=self.taskset_mask,
                                         timeout=self.timeout,
                                         as_root=True)

    def update_result(self, context):
        self._pull_rt_app_logs(context)
        context.result.classifiers.update(dict(
            duration=self.duration,
            task_count=self.task_count,
        ))

        outfile = os.path.join(context.output_directory, RAW_OUTPUT_FILENAME)
        with open(outfile, 'w') as wfh:
            wfh.write(self.output)

        error_count = 0
        crit_count = 0
        for line in self.output.split('\n'):
            match = PLOAD_REGEX.search(line)
            if match:
                pload_value = match.group(1)
                pload_unit = match.group(2)
                calib_cpu_value = match.group(3)
                context.result.add_metric('pLoad', float(pload_value), pload_unit)
                context.result.add_metric('calib_cpu', float(calib_cpu_value))

            error_match = ERROR_REGEX.search(line)
            if error_match:
                error_count += 1

            crit_match = CRIT_REGEX.search(line)
            if crit_match:
                crit_count += 1

        context.result.add_metric('error_count', error_count, 'count')
        context.result.add_metric('crit_count', crit_count, 'count')

    def finalize(self, context):
        if self.uninstall_on_exit:
            self.device.uninstall(self.device_binary)
        self.device.execute('rm -rf {}'.format(self.device_working_directory))

    def _deploy_rt_app_binary_if_necessary(self):
        # called from initialize() so gets invoked once per run
        RtApp.device_binary = self.device.get_binary_path("rt-app")
        if self.force_install or not RtApp.device_binary:
            if not self.host_binary:
                message = '''rt-app is not installed on the device and could not be
                             found in workload resources'''
                raise ResourceError(message)
            RtApp.device_binary = self.device.install(self.host_binary)

    def _load_json_config(self, context):
        user_config_file = self._get_raw_json_config(context.resolver)
        config_file = self._generate_workgen_config(user_config_file,
                                                    context.output_directory)
        with open(config_file) as fh:
            config_data = json.load(fh, object_pairs_hook=OrderedDict)
        self._update_rt_app_config(config_data)
        self.duration = config_data['global'].get('duration', 0)
        self.task_count = len(config_data.get('tasks', []))
        with open(config_file, 'w') as wfh:
            json.dump(config_data, wfh, indent=4)
        return config_file

    def _get_raw_json_config(self, resolver):
        if os.path.splitext(self.config)[1] != '.json':
            self.config += '.json'
        if os.path.isfile(self.config):
            return os.path.abspath(self.config)
        partial_path = os.path.join('use_cases', self.config)
        return resolver.get(File(self, partial_path))

    def _generate_workgen_config(self, user_file, output_directory):
        output_file = os.path.join(output_directory, 'unkind.json')
        # use workgen dry run option to generate a use case
        # file with proper JSON grammar on host first
        try:
            check_output('python {} -d -o {} {}'.format(self.workgen_script,
                                                        output_file,
                                                        user_file),
                         shell=True)
        except CalledProcessError as e:
            message = 'Could not generate config using workgen, got "{}"'
            raise WorkloadError(message.format(e))
        return output_file

    def _update_rt_app_config(self, config_data):
        config_data['global'] = config_data.get('global', {})
        config_data['global']['logdir'] = self.device_working_directory
        config_data['global']['log_basename'] = self.log_basename
        if self.duration is not None:
            config_data['global']['duration'] = self.duration

    def _pull_rt_app_logs(self, context):
        tar_command = '{} tar czf {}/{} -C {} .'.format(self.device.busybox,
                                                        self.device_working_directory,
                                                        TARBALL_FILENAME,
                                                        self.device_working_directory)
        self.device.execute(tar_command, timeout=300)
        device_path = self.device.path.join(self.device_working_directory, TARBALL_FILENAME)
        host_path = os.path.join(context.output_directory, TARBALL_FILENAME)
        self.device.pull_file(device_path, host_path, timeout=120)
        with tarfile.open(host_path, 'r:gz') as tf:
            tf.extractall(context.output_directory)
        os.remove(host_path)
        self.device.execute('rm -rf {}/*'.format(self.device_working_directory))
Ejemplo n.º 27
0
class Antutu(AndroidUiAutoBenchmark):

    name = 'antutu'
    description = """
    AnTuTu Benchmark is an benchmarking tool for Android Mobile Phone/Pad. It
    can run a full test of a key project, through the "Memory Performance","CPU
    Integer Performance","CPU Floating point Performance","2D 3D Graphics
    Performance","SD card reading/writing speed","Database IO" performance
    testing, and gives accurate analysis for Andriod smart phones.

    http://www.antutulabs.com/AnTuTu-Benchmark

    From the website:

    AnTuTu Benchmark can support the latest quad-core cpu. In reaching the
    overall and individual scores of the hardware, AnTuTu Benchmark could judge
    your phone by the scores of the performance of the hardware. By uploading
    the scores, Benchmark can view your device in the world rankings, allowing
    points to let you know the level of hardware performance equipment.

    """
    #pylint: disable=E1101

    package = "com.antutu.ABenchMark"
    activity = ".ABenchMarkStart"
    summary_metrics = ['score', 'Overall_Score']

    valid_versions = ['3.3.2', '4.0.3', '5.3.0']

    device_prefs_directory = '/data/data/com.antutu.ABenchMark/shared_prefs'
    device_prefs_file = '/'.join(
        [device_prefs_directory, 'com.antutu.ABenchMark_preferences.xml'])
    local_prefs_directory = os.path.join(os.path.dirname(__file__),
                                         'shared_prefs')

    parameters = [
        Parameter(
            'version',
            allowed_values=valid_versions,
            default=sorted(valid_versions, reverse=True)[0],
            description=
            ('Specify the version of AnTuTu to be run. If not specified, the latest available '
             'version will be used.')),
        Parameter(
            'times',
            kind=int,
            default=1,
            description=
            ('The number of times the benchmark will be executed in a row (i.e. '
             'without going through the full setup/teardown process). Note: this does '
             'not work with versions prior to 4.0.3.')),
        Parameter(
            'enable_sd_tests',
            kind=bool,
            default=False,
            description=
            ('If ``True`` enables SD card tests in pre version 4 AnTuTu. These tests '
             'were know to cause problems on platforms without an SD card. This parameter '
             'will be ignored on AnTuTu version 4 and higher.')),
    ]

    def __init__(self, device, **kwargs):  # pylint: disable=W0613
        super(Antutu, self).__init__(device, **kwargs)
        self.run_timeout = 6 * 60 * self.times
        self.uiauto_params['version'] = self.version
        self.uiauto_params['times'] = self.times
        self.uiauto_params['enable_sd_tests'] = self.enable_sd_tests

    def update_result(self, context):
        super(Antutu, self).update_result(context)
        with open(self.logcat_log) as fh:
            if self.version == '4.0.3':
                metrics = extract_version4_metrics(fh)
            else:
                metrics = extract_older_version_metrics(fh)
        for key, value in metrics.iteritems():
            key = key.replace(' ', '_')
            context.result.add_metric(key, value)
Ejemplo n.º 28
0
class EnergyProbe(Instrument):

    name = 'energy_probe'
    description = """Collects power traces using the ARM energy probe.

                     This instrument requires ``caiman`` utility to be installed in the workload automation
                     host and be in the PATH. Caiman is part of DS-5 and should be in ``/path/to/DS-5/bin/`` .
                     Energy probe can simultaneously collect energy from up to 3 power rails.

                     To connect the energy probe on a rail, connect the white wire to the pin that is closer to the
                     Voltage source and the black wire to the pin that is closer to the load (the SoC or the device
                     you are probing). Between the pins there should be a shunt resistor of known resistance in the
                     range of 5 to 20 mOhm. The resistance of the shunt resistors is a mandatory parameter
                     ``resistor_values``.

                    .. note:: This instrument can process results a lot faster if python pandas is installed.
                    """

    parameters = [
        Parameter(
            'resistor_values',
            kind=list_of_numbers,
            default=[],
            description=
            """The value of shunt resistors. This is a mandatory parameter."""
        ),
        Parameter(
            'labels',
            kind=list,
            default=[],
            description="""Meaningful labels for each of the monitored rails."""
        ),
        Parameter(
            'device_entry',
            kind=str,
            default='/dev/ttyACM0',
            description=
            """Path to /dev entry for the energy probe (it should be /dev/ttyACMx)"""
        ),
    ]

    MAX_CHANNELS = 3

    def __init__(self, device, **kwargs):
        super(EnergyProbe, self).__init__(device, **kwargs)
        self.attributes_per_sample = 3
        self.bytes_per_sample = self.attributes_per_sample * 4
        self.attributes = ['power', 'voltage', 'current']
        for i, val in enumerate(self.resistor_values):
            self.resistor_values[i] = int(1000 * float(val))

    def validate(self):
        if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True):
            raise InstrumentError(
                'caiman not in PATH. Cannot enable energy probe')
        if not self.resistor_values:
            raise ConfigError('At least one resistor value must be specified')
        if len(self.resistor_values) > self.MAX_CHANNELS:
            raise ConfigError(
                '{} Channels where specified when Energy Probe supports up to {}'
                .format(len(self.resistor_values), self.MAX_CHANNELS))
        if pandas is None:
            self.logger.warning(
                "pandas package will significantly speed up this instrument")
            self.logger.warning("to install it try: pip install pandas")

    def setup(self, context):
        if not self.labels:
            self.labels = [
                "PORT_{}".format(channel)
                for channel, _ in enumerate(self.resistor_values)
            ]
        self.output_directory = os.path.join(context.output_directory,
                                             'energy_probe')
        rstring = ""
        for i, rval in enumerate(self.resistor_values):
            rstring += '-r {}:{} '.format(i, rval)
        self.command = 'caiman -d {} -l {} {}'.format(self.device_entry,
                                                      rstring,
                                                      self.output_directory)
        os.makedirs(self.output_directory)

    def start(self, context):
        self.logger.debug(self.command)
        self.caiman = subprocess.Popen(self.command,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE,
                                       stdin=subprocess.PIPE,
                                       preexec_fn=os.setpgrp,
                                       shell=True)

    def stop(self, context):
        os.killpg(self.caiman.pid, signal.SIGTERM)

    def update_result(self, context):  # pylint: disable=too-many-locals
        num_of_channels = len(self.resistor_values)
        processed_data = [[] for _ in xrange(num_of_channels)]
        filenames = [
            os.path.join(self.output_directory, '{}.csv'.format(label))
            for label in self.labels
        ]
        struct_format = '{}I'.format(num_of_channels *
                                     self.attributes_per_sample)
        not_a_full_row_seen = False
        with open(os.path.join(self.output_directory, "0000000000"),
                  "rb") as bfile:
            while True:
                data = bfile.read(num_of_channels * self.bytes_per_sample)
                if data == '':
                    break
                try:
                    unpacked_data = struct.unpack(struct_format, data)
                except struct.error:
                    if not_a_full_row_seen:
                        self.logger.warn(
                            'possibly missaligned caiman raw data, row contained {} bytes'
                            .format(len(data)))
                        continue
                    else:
                        not_a_full_row_seen = True
                for i in xrange(num_of_channels):
                    index = i * self.attributes_per_sample
                    processed_data[i].append({
                        attr: val
                        for attr, val in zip(
                            self.attributes,
                            unpacked_data[index:index +
                                          self.attributes_per_sample])
                    })
        for i, path in enumerate(filenames):
            with open(path, 'w') as f:
                if pandas is not None:
                    self._pandas_produce_csv(processed_data[i], f)
                else:
                    self._slow_produce_csv(processed_data[i], f)

    # pylint: disable=R0201
    def _pandas_produce_csv(self, data, f):
        dframe = pandas.DataFrame(data)
        dframe = dframe / 1000.0
        dframe.to_csv(f)

    def _slow_produce_csv(self, data, f):
        new_data = []
        for entry in data:
            new_data.append({key: val / 1000.0 for key, val in entry.items()})
        writer = csv.DictWriter(f, self.attributes)
        writer.writeheader()
        writer.writerows(new_data)
Ejemplo n.º 29
0
class Daq(Instrument):

    name = 'daq'
    description = """
    DAQ instrument obtains the power consumption of the target device's core
    measured by National Instruments Data Acquisition(DAQ) device.

    WA communicates with a DAQ device server running on a Windows machine
    (Please refer to :ref:`daq_setup`) over a network. You must specify the IP
    address and port the server is listening on in the config file as follows ::

        daq_server_host = '10.1.197.176'
        daq_server_port = 45677

    These values will be output by the server when you run it on Windows.

    You must also specify the values of resistors (in Ohms) across which the
    voltages are measured (Please refer to :ref:`daq_setup`). The values should be
    specified as a list with an entry for each resistor, e.g.::

        daq_resistor_values = [0.005, 0.005]

    In addition to this mandatory configuration, you can also optionally specify the
    following::

        :daq_labels: Labels to be used for ports. Defaults to ``'PORT_<pnum>'``, where
                     'pnum' is the number of the port.
        :daq_device_id: The ID under which the DAQ is registered with the driver.
                        Defaults to ``'Dev1'``.
        :daq_v_range: Specifies the voltage range for the SOC voltage channel on the DAQ
                      (please refer to :ref:`daq_setup` for details). Defaults to ``2.5``.
        :daq_dv_range: Specifies the voltage range for the resistor voltage channel on
                       the DAQ (please refer to :ref:`daq_setup` for details).
                       Defaults to ``0.2``.
        :daq_sampling_rate: DAQ sampling rate. DAQ will take this many samples each
                            second. Please note that this maybe limitted by your DAQ model
                            and then number of ports you're measuring (again, see
                            :ref:`daq_setup`). Defaults to ``10000``.
        :daq_channel_map: Represents mapping from  logical AI channel number to physical
                          connector on the DAQ (varies between DAQ models). The default
                          assumes DAQ 6363 and similar with AI channels on connectors
                          0-7 and 16-23.

    """

    parameters = [
        Parameter(
            'server_host',
            kind=str,
            default='localhost',
            global_alias='daq_server_host',
            description=
            'The host address of the machine that runs the daq Server which the '
            'insturment communicates with.'),
        Parameter(
            'server_port',
            kind=int,
            default=45677,
            global_alias='daq_server_port',
            description=
            'The port number for daq Server in which daq insturment communicates '
            'with.'),
        Parameter('device_id',
                  kind=str,
                  default='Dev1',
                  global_alias='daq_device_id',
                  description=
                  'The ID under which the DAQ is registered with the driver.'),
        Parameter(
            'v_range',
            kind=float,
            default=2.5,
            global_alias='daq_v_range',
            description=
            'Specifies the voltage range for the SOC voltage channel on the DAQ '
            '(please refer to :ref:`daq_setup` for details).'),
        Parameter(
            'dv_range',
            kind=float,
            default=0.2,
            global_alias='daq_dv_range',
            description=
            'Specifies the voltage range for the resistor voltage channel on '
            'the DAQ (please refer to :ref:`daq_setup` for details).'),
        Parameter(
            'sampling_rate',
            kind=int,
            default=10000,
            global_alias='daq_sampling_rate',
            description=
            'DAQ sampling rate. DAQ will take this many samples each '
            'second. Please note that this maybe limitted by your DAQ model '
            'and then number of ports you\'re measuring (again, see '
            ':ref:`daq_setup`)'),
        Parameter(
            'resistor_values',
            kind=list,
            mandatory=True,
            global_alias='daq_resistor_values',
            description=
            'The values of resistors (in Ohms) across which the voltages are measured on '
            'each port.'),
        Parameter(
            'channel_map',
            kind=list_of_ints,
            default=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
            global_alias='daq_channel_map',
            description=
            'Represents mapping from  logical AI channel number to physical '
            'connector on the DAQ (varies between DAQ models). The default '
            'assumes DAQ 6363 and similar with AI channels on connectors '
            '0-7 and 16-23.'),
        Parameter(
            'labels',
            kind=list_of_strs,
            global_alias='daq_labels',
            description=
            'List of port labels. If specified, the lenght of the list must match '
            'the length of ``resistor_values``. Defaults to "PORT_<pnum>", where '
            '"pnum" is the number of the port.'),
        Parameter('negative_samples',
                  default='keep',
                  allowed_values=['keep', 'zero', 'drop', 'abs'],
                  global_alias='daq_negative_samples',
                  description="""
                  Specifies how negative power samples should be handled. The following
                  methods are possible:

                    :keep: keep them as they are
                    :zero: turn negative values to zero
                    :drop: drop samples if they contain negative values. *warning:* this may result in
                           port files containing different numbers of samples
                    :abs: take the absoulte value of negave samples

                  """),
        Parameter('gpio_sync',
                  kind=int,
                  constraint=lambda x: x > 0,
                  description="""
                  If specified, the instrument will simultaneously set the
                  specified GPIO pin high and put a marker into ftrace. This is
                  to facillitate syncing kernel trace events to DAQ power
                  trace.
                  """),
    ]

    def initialize(self, context):
        status, devices = self._execute_command('list_devices')
        if status == daq.Status.OK and not devices:
            raise InstrumentError(
                'DAQ: server did not report any devices registered with the driver.'
            )
        self._results = OrderedDict()
        self.gpio_path = None
        if self.gpio_sync:
            if not self.device.file_exists(GPIO_ROOT):
                raise InstrumentError('GPIO sysfs not enabled on the device.')
            try:
                export_path = self.device.path.join(GPIO_ROOT, 'export')
                self.device.set_sysfile_value(export_path,
                                              self.gpio_sync,
                                              verify=False)
                pin_root = self.device.path.join(
                    GPIO_ROOT, 'gpio{}'.format(self.gpio_sync))
                direction_path = self.device.path.join(pin_root, 'direction')
                self.device.set_sysfile_value(direction_path, 'out')
                self.gpio_path = self.device.path.join(pin_root, 'value')
                self.device.set_sysfile_value(self.gpio_path, 0, verify=False)
                signal.connect(self.insert_start_marker,
                               signal.BEFORE_WORKLOAD_EXECUTION,
                               priority=11)
                signal.connect(self.insert_stop_marker,
                               signal.AFTER_WORKLOAD_EXECUTION,
                               priority=11)
            except DeviceError as e:
                raise InstrumentError(
                    'Could not configure GPIO on device: {}'.format(e))

    def setup(self, context):
        self.logger.debug('Initialising session.')
        self._execute_command('configure', config=self.device_config)

    def slow_start(self, context):
        self.logger.debug('Starting collecting measurements.')
        self._execute_command('start')

    def slow_stop(self, context):
        self.logger.debug('Stopping collecting measurements.')
        self._execute_command('stop')

    def update_result(self, context):  # pylint: disable=R0914
        self.logger.debug('Downloading data files.')
        output_directory = _d(os.path.join(context.output_directory, 'daq'))
        self._execute_command('get_data', output_directory=output_directory)
        for entry in os.listdir(output_directory):
            context.add_iteration_artifact(
                'DAQ_{}'.format(os.path.splitext(entry)[0]),
                path=os.path.join('daq', entry),
                kind='data',
                description='DAQ power measurments.')
            port = os.path.splitext(entry)[0]
            path = os.path.join(output_directory, entry)
            key = (context.spec.id, context.spec.label,
                   context.current_iteration)
            if key not in self._results:
                self._results[key] = {}

            temp_file = os.path.join(tempfile.gettempdir(), entry)
            writer, wfh = None, None

            with open(path) as fh:
                if self.negative_samples != 'keep':
                    wfh = open(temp_file, 'wb')
                    writer = csv.writer(wfh)

                reader = csv.reader(fh)
                metrics = reader.next()
                if writer:
                    writer.writerow(metrics)
                self._metrics |= set(metrics)

                rows = _get_rows(reader, writer, self.negative_samples)
                #data = [map(float, d) for d in zip(*rows)]
                data = zip(*rows)

                if writer:
                    wfh.close()
                    shutil.move(temp_file,
                                os.path.join(output_directory, entry))

                n = len(data[0])
                means = [s / n for s in map(sum, data)]
                for metric, value in zip(metrics, means):
                    metric_name = '{}_{}'.format(port, metric)
                    context.result.add_metric(metric_name, round(value, 3),
                                              UNITS[metric])
                    self._results[key][metric_name] = round(value, 3)
                energy = sum(data[metrics.index('power')]) * (
                    self.sampling_rate / 1000000)
                context.result.add_metric('{}_energy'.format(port),
                                          round(energy, 3), UNITS['energy'])

    def teardown(self, context):
        self.logger.debug('Terminating session.')
        self._execute_command('close')

    def finalize(self, context):
        if self.gpio_path:
            unexport_path = self.device.path.join(GPIO_ROOT, 'unexport')
            self.device.set_sysfile_value(unexport_path,
                                          self.gpio_sync,
                                          verify=False)

    def validate(self):
        if not daq:
            raise ImportError(import_error_mesg)
        self._results = None
        self._metrics = set()
        if self.labels:
            if not (len(self.labels) == len(self.resistor_values)):  # pylint: disable=superfluous-parens
                raise ConfigError(
                    'Number of DAQ port labels does not match the number of resistor values.'
                )
        else:
            self.labels = [
                'PORT_{}'.format(i) for i, _ in enumerate(self.resistor_values)
            ]
        self.server_config = ServerConfiguration(host=self.server_host,
                                                 port=self.server_port)
        self.device_config = DeviceConfiguration(
            device_id=self.device_id,
            v_range=self.v_range,
            dv_range=self.dv_range,
            sampling_rate=self.sampling_rate,
            resistor_values=self.resistor_values,
            channel_map=self.channel_map,
            labels=self.labels)
        try:
            self.server_config.validate()
            self.device_config.validate()
        except ConfigurationError, ex:
            raise ConfigError('DAQ configuration: ' +
                              ex.message)  # Re-raise as a WA error
Ejemplo n.º 30
0
class Ebizzy(Workload):

    name = 'ebizzy'
    description = """
    ebizzy is designed to generate a workload resembling common web
    application server workloads.  It is highly threaded, has a large in-memory
    working set with low locality, and allocates and deallocates memory frequently.
    When running most efficiently, it will max out the CPU.

    ebizzy description taken from the source code at
    https://github.com/linux-test-project/ltp/tree/master/utils/benchmark/ebizzy-0.3

    """

    parameters = [
        # Workload parameters go here e.g.
        Parameter('threads',
                  kind=int,
                  default=2,
                  description='Number of threads to execute.'),
        Parameter('seconds',
                  kind=int,
                  default=10,
                  description='Number of seconds.'),
        Parameter('chunks',
                  kind=int,
                  default=10,
                  description='Number of memory chunks to allocate.'),
        Parameter(
            'extra_params',
            kind=str,
            default='',
            description='Extra parameters to pass in (e.g. -M to disable mmap).'
            ' See ebizzy -? for full list of options.')
    ]

    def setup(self, context):
        timeout_buf = 10
        self.command = '{} -t {} -S {} -n {} {} > {}'
        self.ebizzy_results = os.path.join(self.device.working_directory,
                                           results_txt)
        self.device_binary = None
        self.run_timeout = self.seconds + timeout_buf

        self.binary_name = 'ebizzy'
        if not self.device.is_installed(self.binary_name):
            host_binary = context.resolver.get(
                Executable(self, self.device.abi, self.binary_name))
            self.device_binary = self.device.install(host_binary)
        else:
            self.device_binary = self.binary_name

        self.command = self.command.format(self.device_binary, self.threads,
                                           self.seconds, self.chunks,
                                           self.extra_params,
                                           self.ebizzy_results)

    def run(self, context):
        self.device.execute(self.command, timeout=self.run_timeout)

    def update_result(self, context):
        self.device.pull_file(self.ebizzy_results, context.output_directory)

        with open(os.path.join(context.output_directory,
                               results_txt)) as ebizzy_file:
            for line in ebizzy_file:
                record_match = record_regex.search(line)
                if record_match:
                    context.result.add_metric('total_recs',
                                              record_match.group('record'),
                                              'records/s')

                results_match = result_regex.search(line)
                if results_match:
                    context.result.add_metric(results_match.group('metric'),
                                              results_match.group('value'),
                                              results_match.group('unit'))

    def teardown(self, context):
        self.device.uninstall_executable(self.device_binary)

    def validate(self):
        pass