Exemplo n.º 1
0
class MonsoonBackend(EnergyInstrumentBackend):

    name = 'monsoon'
    description = """
    Monsoon Solutions power monitor

    To use this instrument, you need to install the monsoon.py script available
    from the Android Open Source Project. As of May 2017 this is under the CTS
    repository:

        https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py

    Collects power measurements only, from a selection of two channels, the USB
    passthrough channel and the main output channel.

    """

    parameters = [
        Parameter('monsoon_bin',
                  default=which('monsoon.py'),
                  description="""
                  Path to monsoon.py executable. If not provided,
                  ``PATH`` is searched.
                  """),
        Parameter('tty_device',
                  default='/dev/ttyACM0',
                  description="""
                  TTY device to use to communicate with the Power
                  Monitor. If not provided, /dev/ttyACM0 is used.
                  """)
    ]

    instrument = MonsoonInstrument
Exemplo n.º 2
0
class YoutubePlayback(ApkWorkload):
    """
    Simple Youtube video playback

    This triggers a video streaming playback on Youtube. Unlike the more
    featureful "youtube" workload, this performs no other action that starting
    the video via an intent and then waiting for a certain amount of playback
    time. This is therefore only useful when you are confident that the content
    on the end of the provided URL is stable - that means the video should have
    no advertisements attached.
    """
    name = 'youtube_playback'

    package_names = ['com.google.android.youtube']
    action = 'android.intent.action.VIEW'

    parameters = [
        Parameter('video_url', default='https://www.youtube.com/watch?v=YE7VzlLtp-4',
                  description='URL of video to play'),
        Parameter('duration', kind=int, default=20,
                  description='Number of seconds of video to play'),
    ]

    def setup(self, context):
        super(YoutubePlayback, self).setup(context)

        self.command = 'am start -a {} {}'.format(self.action, self.video_url)

    def run(self, context):
        self.target.execute(self.command)

        time.sleep(self.duration)
Exemplo n.º 3
0
class TargzProcessor(OutputProcessor):

    name = 'targz'

    description = '''
    Create a tarball of the output directory.

    This will create a gzip-compressed tarball of the output directory. By
    default, it will be created at the same level and will have the same name
    as the output directory but with a .tar.gz extensions.
    '''

    parameters = [
        Parameter('outfile',
                  description='''
                  The name  of the output file to be used. If this is not an
                  absolute path, the file will be created realtive to the
                  directory in which WA was invoked. If this contains
                  subdirectories, they must already exist.

                  The name may contain named format specifiers. Any of the
                  ``RunInfo`` fields can be named, resulting in the value of
                  that filed (e.g. ``'start_time'``) being formatted into the
                  tarball name.

                  By default, the output file will be created at the same
                  level, share the name of the WA output directory (but with
                  .tar.gz extension).
                  '''),
        Parameter('delete-output',
                  kind=bool,
                  default=False,
                  description='''
                  if set to ``True``, WA output directory will be deleted after
                  the tarball is created.
                  '''),
    ]

    def initialize(self, context):
        if self.delete_output:
            self.logger.debug('Registering RUN_FINALIZED handler.')
            signal.connect(self.delete_output_directory,
                           signal.RUN_FINALIZED,
                           priority=-100)

    def export_run_output(self, run_output, target_info):  # pylint: disable=unused-argument
        if self.outfile:
            outfile_path = self.outfile.format(**run_output.info.to_pod())
        else:
            outfile_path = run_output.basepath.rstrip('/') + '.tar.gz'

        self.logger.debug('Creating {}'.format(outfile_path))
        with tarfile.open(outfile_path, 'w:gz') as tar:
            tar.add(run_output.basepath)

    def delete_output_directory(self, context):
        self.logger.debug('Deleting output directory')
        shutil.rmtree(context.run_output.basepath)
Exemplo n.º 4
0
class EnergyProbeBackend(EnergyInstrumentBackend):

    name = 'energy_probe'
    description = """
    Arm Energy Probe caiman version

    This backend relies on caiman utility:

        https://github.com/ARM-software/caiman

    For more information about Arm Energy Probe please see

        https://developer.arm.com/products/software-development-tools/ds-5-development-studio/streamline/arm-energy-probe

    """

    parameters = [
        Parameter('resistor_values',
                  kind=list_of_ints,
                  description="""
                  The values of resistors (in Ohms) across which the voltages
                  are measured on.
                  """),
        Parameter('labels',
                  kind=list_of_strings,
                  description="""
                  'List of port labels. If specified, the length of the list
                  must match the length of ``resistor_values``.
                  """),
        Parameter('device_entry',
                  kind=str,
                  default='/dev/ttyACM0',
                  description="""
                  Path to /dev entry for the energy probe (it should be /dev/ttyACMx)
                  """),
        Parameter('keep_raw',
                  kind=bool,
                  default=False,
                  description="""
                  If set to ``True``, this will prevent the raw files obtained
                  from the device before processing from being deleted
                  (this is maily used for debugging).
                  """),
    ]

    instrument = EnergyProbeInstrument

    def validate_parameters(self, params):
        if not params.get('resistor_values'):
            raise ConfigError(
                'Mandatory parameter "resistor_values" is not set.')
        if params.get('labels'):
            if len(params.get('labels')) != len(params.get('resistor_values')):
                msg = 'Number of Energy Probe port labels does not match the number of resistor values.'
                raise ConfigError(msg)
Exemplo n.º 5
0
class ArmEnergyProbeBackend(EnergyInstrumentBackend):

    name = 'arm_energy_probe'
    description = """
    Arm Energy Probe arm-probe version

    An alternative Arm Energy Probe backend that relies on arm-probe utility:

        https://git.linaro.org/tools/arm-probe.git

    For more information about Arm Energy Probe please see

        https://developer.arm.com/products/software-development-tools/ds-5-development-studio/streamline/arm-energy-probe


    """

    parameters = [
        Parameter('config_file',
                  kind=str,
                  description="""
                  Path to config file of the AEP
                  """),
        Parameter('keep_raw',
                  kind=bool,
                  default=False,
                  description="""
                  If set to ``True``, this will prevent the raw files obtained
                  from the device before processing from being deleted
                  (this is maily used for debugging).
                  """),
    ]

    instrument = ArmEnergyProbeInstrument

    def get_instruments(self, target, metadir, **kwargs):
        """
        Get a dict mapping device keys to an Instruments

        Typically there is just a single device/instrument, in which case the
        device key is arbitrary.
        """

        shutil.copy(self.config_file, metadir)

        return {None: self.instrument(target, **kwargs)}

    def validate_parameters(self, params):
        if not params.get('config_file'):
            raise ConfigError('Mandatory parameter "config_file" is not set.')
        self.config_file = params.get('config_file')
        if not os.path.exists(self.config_file):
            raise ConfigError('"config_file" does not exist.')
Exemplo n.º 6
0
class ShellScript(Workload):

    name = 'shellscript'
    description = """
    Runs an arbitrary shellscript on the target.

    """

    parameters = [
        Parameter('script_file', mandatory=True,
                  description='''
                  The path (on the host) to the shell script file. This must be
                  an absolute path (though it may contain ~).
                  '''),
        Parameter('argstring', default='',
                  description='A string that should contain arguments passed to the script.'),
        Parameter('as_root', kind=bool, default=False,
                  description='Specify whether the script should be run as root.'),
        Parameter('timeout', kind=int, default=60,
                  description='Timeout, in seconds, for the script run time.'),
    ]

    def initialize(self, context):
        if self.as_root and not self.target.is_rooted:
            raise WorkloadError('Cannot run script as root -- target appears to be unrooted.')

        self.script_file = os.path.expanduser(self.script_file)
        if not os.path.isfile(self.script_file):
            raise ConfigError('Can\'t access file (is the path correct?): {}'.format(self.script_file))
        self.output = None
        self.command = None
        self.on_target_script_file = None

    def setup(self, context):
        self.on_target_script_file = self.target.get_workpath(os.path.basename(self.script_file))
        self.target.push(self.script_file, self.on_target_script_file)
        self.command = 'sh {} {}'.format(self.on_target_script_file, self.argstring)

    def run(self, context):
        self.output = self.target.execute(self.command, timeout=self.timeout, as_root=self.as_root)

    def extract_results(self, context):
        with open(os.path.join(context.output_directory, 'output.txt'), 'w') as wfh:
            wfh.write(self.output)

    def teardown(self, context):
        if self.cleanup_assets:
            self.target.remove(self.on_target_script_file)
Exemplo n.º 7
0
class Gfxbench(ApkUiautoWorkload):

    name = 'gfxbench-corporate'
    package_names = ['net.kishonti.gfxbench.gl.v50000.corporate']
    clear_data_on_reset = False
    regex_matches = [
        re.compile(r'Car Chase score (.+)'),
        re.compile(r'Car Chase Offscreen score (.+)'),
        re.compile(r'Manhattan 3.1 score (.+)'),
        re.compile(r'1080p Manhattan 3.1 Offscreen score (.+)'),
        re.compile(r'1440p Manhattan 3.1 Offscreen score (.+)'),
        re.compile(r'Tessellation score (.+)'),
        re.compile(r'Tessellation Offscreen score (.+)')
    ]
    score_regex = re.compile(r'.*?([\d.]+).*')
    description = '''
    Execute a subset of graphical performance benchmarks

    Test description:
    1. Open the gfxbench application
    2. Execute Car Chase, Manhattan and Tessellation benchmarks

    '''
    parameters = [
        Parameter('timeout',
                  kind=int,
                  default=3600,
                  description=('Timeout for an iteration of the benchmark.')),
    ]

    def __init__(self, target, **kwargs):
        super(Gfxbench, self).__init__(target, **kwargs)
        self.gui.timeout = self.timeout

    def update_output(self, context):
        super(Gfxbench, self).update_output(context)
        expected_results = len(self.regex_matches)
        logcat_file = context.get_artifact_path('logcat')
        with open(logcat_file) as fh:
            for line in fh:
                for regex in self.regex_matches:
                    match = regex.search(line)
                    # Check if we have matched the score string in logcat
                    if match:
                        score_match = self.score_regex.search(match.group(1))
                        # Check if there is valid number found for the score.
                        if score_match:
                            result = float(score_match.group(1))
                        else:
                            result = 'NaN'
                        entry = regex.pattern.rsplit(None, 1)[0]
                        context.add_metric(entry,
                                           result,
                                           'FPS',
                                           lower_is_better=False)
                        expected_results -= 1
        if expected_results > 0:
            msg = "The GFXBench workload has failed. Expected {} scores, Detected {} scores."
            raise WorkloadError(
                msg.format(len(self.regex_matches), expected_results))
Exemplo n.º 8
0
class TheChase(ApkWorkload):

    name = 'thechase'
    description = """
    The Chase demo showcasing the capabilities of Unity game engine.

    This demo, is a static video-like game demo, that demonstrates advanced features
    of the unity game engine. It loops continuously until terminated.

    """

    package_names = ['com.unity3d.TheChase']
    install_timeout = 200
    view = 'SurfaceView - com.unity3d.TheChase/com.unity3d.player.UnityPlayerNativeActivity'

    parameters = [
        Parameter(
            'duration',
            kind=int,
            default=70,
            description=
            ('Duration, in seconds, note that the demo loops the same (roughly) 60 '
             'second sceene until stopped.')),
    ]

    def run(self, context):
        self.target.sleep(self.duration)
Exemplo n.º 9
0
class HomeScreen(Workload):

    name = 'homescreen'
    description = """
    A workload that goes to the home screen and idles for the the
    specified duration.

    """
    supported_platforms = ['android']

    parameters = [
        Parameter(
            'duration',
            kind=int,
            default=20,
            description='Specifies the duration, in seconds, of this workload.'
        ),
    ]

    def setup(self, context):
        self.target.clear_logcat()
        self.target.execute('input keyevent 3')  # press the home key

    def run(self, context):
        self.target.sleep(self.duration)
Exemplo n.º 10
0
class DynamicFrequencyInstrument(SysfsExtractor):

    name = 'cpufreq'
    description = """
    Collects dynamic frequency (DVFS) settings before and after workload execution.

    """

    tarname = 'cpufreq.tar.gz'

    parameters = [
        Parameter('paths', mandatory=False, override=True),
    ]

    def setup(self, context):
        self.paths = ['/sys/devices/system/cpu']
        if self.use_tmpfs:
            self.paths.append('/sys/class/devfreq/*'
                              )  # the '*' would cause problems for adb pull.
        super(DynamicFrequencyInstrument, self).setup(context)

    def validate(self):
        super(DynamicFrequencyInstrument, self).validate()
        if not self.tmpfs_mount_point.endswith('-cpufreq'):  # pylint: disable=access-member-before-definition
            self.tmpfs_mount_point += '-cpufreq'
Exemplo n.º 11
0
class Speedometer(UiautoWorkload):

    name = 'speedometer'
    regex = re.compile(r'Speedometer Score ([\d.]+)')
    versions = ['1.0', '2.0']
    description = '''
    A workload to execute the speedometer web based benchmark

    Test description:
    1. Open browser application
    2. Navigate to the speedometer website - http://browserbench.org/Speedometer/
    3. Execute the benchmark

    '''

    parameters = [
        Parameter('version',
                  allowed_values=versions,
                  kind=str,
                  default='2.0',
                  description='''
                  The speedometer version to be used.
                  ''')
    ]

    def __init__(self, target, **kwargs):
        super(Speedometer, self).__init__(target, **kwargs)
        self.gui.timeout = 1500
        self.gui.uiauto_params['version'] = self.version

    def setup(self, context):
        super(Speedometer, self).setup(context)
        url = 'am start -a android.intent.action.VIEW -d http://browserbench.org/Speedometer' + self.version
        if self.version == '1.0':
            url = 'am start -a android.intent.action.VIEW -d http://browserbench.org/Speedometer'
        self.target.execute(url)

    def update_output(self, context):
        super(Speedometer, self).update_output(context)
        result = None
        logcat_file = context.get_artifact_path('logcat')
        with open(logcat_file) as fh:
            for line in fh:
                match = self.regex.search(line)
                if match:
                    result = float(match.group(1))

        if result is not None:
            context.add_metric('Speedometer Score',
                               result,
                               'Runs per minute',
                               lower_is_better=False)
        else:
            raise WorkloadError(
                "The Speedometer workload has failed. No score was obtainable."
            )
Exemplo n.º 12
0
class GeekbenchCorproate(Geekbench):  # pylint: disable=too-many-ancestors
    name = "geekbench-corporate"
    is_corporate = True
    requires_network = False
    supported_versions = ['4.1.0', '5.0.0']
    package_names = ['com.primatelabs.geekbench4.corporate']
    activity = 'com.primatelabs.geekbench.HomeActivity'

    parameters = [
        Parameter('version', allowed_values=supported_versions, override=True)
    ]
Exemplo n.º 13
0
class Speedometer(ApkUiautoWorkload):

    name = 'speedometer'
    package_names = ['com.android.chrome']
    regex = re.compile(r'Speedometer Score ([\d.]+)')
    versions = ['1.0', '2.0']
    description = '''
    A workload to execute the speedometer web based benchmark

    Test description:
    1. Open chrome
    2. Navigate to the speedometer website - http://browserbench.org/Speedometer/
    3. Execute the benchmark

    known working chrome version 80.0.3987.149
    '''

    parameters = [
        Parameter('speedometer_version',
                  allowed_values=versions,
                  kind=str,
                  default='2.0',
                  description='''
                  The speedometer version to be used.
                  ''')
    ]

    requires_network = True

    def __init__(self, target, **kwargs):
        super(Speedometer, self).__init__(target, **kwargs)
        self.gui.timeout = 1500
        self.gui.uiauto_params['version'] = self.speedometer_version

    def update_output(self, context):
        super(Speedometer, self).update_output(context)
        result = None
        logcat_file = context.get_artifact_path('logcat')
        with open(logcat_file) as fh:
            for line in fh:
                match = self.regex.search(line)
                if match:
                    result = float(match.group(1))

        if result is not None:
            context.add_metric('Speedometer Score',
                               result,
                               'Runs per minute',
                               lower_is_better=False)
        else:
            raise WorkloadError(
                "The Speedometer workload has failed. No score was obtainable."
            )
Exemplo n.º 14
0
class DmesgInstrument(Instrument):
    # pylint: disable=no-member,attribute-defined-outside-init
    """
    Collected dmesg output before and during the run.

    """

    name = 'dmesg'

    parameters = [
        Parameter('loglevel',
                  kind=int,
                  allowed_values=list(range(8)),
                  description='Set loglevel for console output.')
    ]

    loglevel_file = '/proc/sys/kernel/printk'

    def initialize(self, context):  # pylint: disable=unused-argument
        self.need_root = self.target.os == 'android'
        if self.need_root and not self.target.is_rooted:
            raise InstrumentError('Need root to collect dmesg on Android')

    def setup(self, context):
        if self.loglevel:
            self.old_loglevel = self.target.read_int(self.loglevel_file)
            self.target.write_value(self.loglevel_file,
                                    self.loglevel,
                                    verify=False)
        self.before_file = _f(
            os.path.join(context.output_directory, 'dmesg', 'before'))
        self.after_file = _f(
            os.path.join(context.output_directory, 'dmesg', 'after'))

    @slow
    def start(self, context):
        with open(self.before_file, 'w') as wfh:
            wfh.write(self.target.execute('dmesg', as_root=self.need_root))
        context.add_artifact('dmesg_before', self.before_file, kind='data')
        if self.target.is_rooted:
            self.target.execute('dmesg -c', as_root=True)

    @slow
    def stop(self, context):
        with open(self.after_file, 'w') as wfh:
            wfh.write(self.target.execute('dmesg', as_root=self.need_root))
        context.add_artifact('dmesg_after', self.after_file, kind='data')

    def teardown(self, context):  # pylint: disable=unused-argument
        if self.loglevel:
            self.target.write_value(self.loglevel_file,
                                    self.old_loglevel,
                                    verify=False)
Exemplo n.º 15
0
class Uibench(ApkWorkload):

    name = 'uibench'
    description = """
        Runs a particular activity of the UIBench_ workload suite. The suite
        is provided by Google as a testbench for the Android UI.

        .. _UIBench: https://android.googlesource.com/platform/frameworks/base/+/refs/heads/master/tests/UiBench/
    """
    package_names = ['com.android.test.uibench']
    loading_time = 1

    parameters = [
        Parameter('activity',
                  kind=str,
                  description="""
                  The UIBench activity to be run. Each activity corresponds to
                  a test. If this parameter is ignored, the application is
                  launched in its main menu. Please note that the available
                  activities vary between versions of UIBench (which follow
                  AOSP versioning) and the availability of the services under
                  test may depend on the version of the target Android. We
                  recommend using the APK of UIBench corresponding to the
                  Android version, enforced through the ``version`` parameter to
                  this workload.
                  """),
        Parameter('duration',
                  kind=int,
                  default=10,
                  description="""
                  As activities do not finish, this workload will terminate
                  UIBench after the given duration.
                  """),
    ]

    def run(self, context):
        super(Uibench, self).run(context)
        self.target.sleep(self.duration)
Exemplo n.º 16
0
class ScreenCaptureInstrument(Instrument):

    name = 'screen_capture'
    description = """
    A simple instrument which captures the screen on the target devices with a user-specified period.

    Please note that if a too short period is specified, then this
    instrument will capture the screen as fast as possible, rather
    than at the specified periodicity.
    """

    parameters = [
        Parameter('period',
                  kind=int,
                  default=10,
                  description="""
                  Period (in seconds) at which to capture the screen on the target.
                  """),
    ]

    def __init__(self, target, **kwargs):
        super(ScreenCaptureInstrument, self).__init__(target, **kwargs)
        self.collector = None

    def setup(self, context):
        # We need to create a directory for the captured screenshots
        output_path = os.path.join(context.output_directory, "screen-capture")
        os.mkdir(output_path)
        self.collector = ScreenCaptureCollector(self.target, self.period)
        self.collector.set_output(output_path)
        self.collector.reset()

    def start(self, context):  # pylint: disable=unused-argument
        self.collector.start()

    def stop(self, context):  # pylint: disable=unused-argument
        self.collector.stop()
Exemplo n.º 17
0
class Gmail(ApkUiautoWorkload):

    name = 'gmail'
    package_names = ['com.google.android.gm']
    description = '''
    A workload to perform standard productivity tasks within Gmail.  The workload carries out
    various tasks, such as creating new emails, attaching images and sending them.

    Test description:
    1. Open Gmail application
    2. Click to create New mail
    3. Attach an image from the local images folder to the email
    4. Enter recipient details in the To field
    5. Enter text in the Subject field
    6. Enter text in the Compose field
    7. Click the Send mail button

    To run the workload in offline mode, a 'mailstore.tar' file is required. In order to
    generate such a file, Gmail should first be operated from an Internet-connected environment.
    After this, the relevant database files can be found in the
    '/data/data/com.google.android.gm/databases' directory. These files can then be archived to
    produce a tarball using a command such as ``tar -cvf mailstore.tar -C /path/to/databases .``.
    The result should then be placed in the '~/.workload_automation/dependencies/gmail/' directory
    on your local machine, creating this if it does not already exist.

    Known working APK version: 2019.05.26.252424914.release
    '''

    parameters = [
        Parameter('recipient',
                  kind=str,
                  default='*****@*****.**',
                  description='''
                  The email address of the recipient.  Setting a void address
                  will stop any mesage failures clogging up your device inbox
                  '''),
        Parameter('test_image',
                  kind=str,
                  default='uxperf_1600x1200.jpg',
                  description='''
                  An image to be copied onto the device that will be attached
                  to the email
                  '''),
        Parameter('offline_mode',
                  kind=bool,
                  default=False,
                  description='''
                  If set to ``True``, the workload will execute in offline mode.
                  This mode requires root and makes use of a tarball of email
                  database files 'mailstore.tar' for the email account to be used.
                  This file is extracted directly to the application's 'databases'
                  directory at '/data/data/com.google.android.gm/databases'.
                  '''),
    ]

    @property
    def requires_network(self):
        return not self.offline_mode

    @property
    def requires_rerun(self):
        # In offline mode we need to restart the application after modifying its data directory
        return self.offline_mode

    def __init__(self, target, **kwargs):
        super(Gmail, self).__init__(target, **kwargs)
        self.deployable_assets = [self.test_image]
        if self.offline_mode:
            self.deployable_assets.append('mailstore.tar')
        self.cleanup_assets = True

    def initialize(self, context):
        super(Gmail, self).initialize(context)
        if self.offline_mode and not self.target.is_rooted:
            raise WorkloadError(
                'This workload requires root to set up Gmail for offline usage.'
            )

    def init_resources(self, context):
        super(Gmail, self).init_resources(context)
        # Allows for getting working directory regardless if path ends with a '/'
        work_dir = self.target.working_directory
        work_dir = work_dir if work_dir[-1] != os.sep else work_dir[:-1]
        self.gui.uiauto_params['workdir_name'] = self.target.path.basename(
            work_dir)
        self.gui.uiauto_params['recipient'] = self.recipient
        self.gui.uiauto_params['offline_mode'] = self.offline_mode
        self.gui.uiauto_params['test_image'] = self.test_image
        # Only accept certain image formats
        if os.path.splitext(
                self.test_image.lower())[1] not in ['.jpg', '.jpeg', '.png']:
            raise ValidationError('{} must be a JPEG or PNG file'.format(
                self.test_image))

    def setup_rerun(self):
        super(Gmail, self).setup_rerun()
        database_src = self.target.path.join(self.target.working_directory,
                                             'mailstore.tar')
        database_dst = self.target.path.join(
            self.target.package_data_directory, self.package, 'databases')
        existing_mailstores = self.target.path.join(database_dst,
                                                    'mailstore.*')
        owner = self.target.execute("{} stat -c '%u' {}".format(
            self.target.busybox, database_dst),
                                    as_root=True).strip()
        self.target.execute('{} rm {}'.format(self.target.busybox,
                                              existing_mailstores),
                            as_root=True)
        self.target.execute('{} tar -xvf {} -C {}'.format(
            self.target.busybox, database_src, database_dst),
                            as_root=True)
        self.target.execute('{0} chown -R {1}:{1} {2}'.format(
            self.target.busybox, owner, database_dst),
                            as_root=True)
Exemplo n.º 18
0
class Jankbench(ApkWorkload):

    name = 'jankbench'
    description = """
    Internal Google benchmark for evaluating jank on Android.

    """
    package_names = ['com.android.benchmark']
    activity = '.app.RunLocalBenchmarksActivity'

    results_db_file = 'BenchmarkResults'

    iteration_regex = re.compile(
        r'System.out: iteration: (?P<iteration>[0-9]+)')
    metrics_regex = re.compile(
        r'System.out: Mean: (?P<mean>[0-9\.]+)\s+JankP: (?P<junk_p>[0-9\.]+)\s+'
        r'StdDev: (?P<std_dev>[0-9\.]+)\s+Count Bad: (?P<count_bad>[0-9]+)\s+'
        r'Count Jank: (?P<count_junk>[0-9]+)')

    valid_test_ids = [
        # Order matters -- the index of the id must match what is expected by
        # the App.
        'list_view',
        'image_list_view',
        'shadow_grid',
        'low_hitrate_text',
        'high_hitrate_text',
        'edit_text',
        'overdraw_test',
    ]

    parameters = [
        Parameter('test_ids',
                  kind=list_or_string,
                  allowed_values=valid_test_ids,
                  description='ID of the jankbench test to be run.'),
        Parameter('loops',
                  kind=int,
                  default=1,
                  constraint=lambda x: x > 0,
                  aliases=['reps'],
                  description='''
                  Specifies the number of times the benchmark will be run in a "tight loop",
                  i.e. without performaing setup/teardown inbetween.
                  '''),
        Parameter('pull_results_db',
                  kind=bool,
                  description='''
                  Secifies whether an sqlite database with detailed results should be pulled
                  from benchmark app's data. This requires the device to be rooted.

                  This defaults to ``True`` for rooted devices and ``False`` otherwise.
                  '''),
        Parameter('timeout',
                  kind=int,
                  default=10 * 60,
                  aliases=['run_timeout'],
                  description="""
                  Time out for workload execution. The workload will be killed if it hasn't completed
                  within this period.
                  """),
    ]

    def setup(self, context):
        super(Jankbench, self).setup(context)

        if self.pull_results_db is None:
            self.pull_results_db = self.target.is_rooted
        elif self.pull_results_db and not self.target.is_rooted:
            raise ConfigError('pull_results_db set for an unrooted device')

        if not self.target.is_container:
            self.target.ensure_screen_is_on()

        self.command = self._build_command()
        self.monitor = JankbenchRunMonitor(self.target)
        self.monitor.start()

    def run(self, context):
        result = self.target.execute(self.command, timeout=self.timeout)
        if 'FAILURE' in result:
            raise WorkloadError(result)
        else:
            self.logger.debug(result)
        self.target.sleep(DELAY)
        self.monitor.wait_for_run_end(self.timeout)

    def extract_results(self, context):
        self.monitor.stop()
        if self.pull_results_db:
            target_file = self.target.path.join(
                self.target.package_data_directory, self.package, 'databases',
                self.results_db_file)
            host_file = os.path.join(context.output_directory,
                                     self.results_db_file)
            self.target.pull(target_file, host_file, as_root=True)
            context.add_artifact('jankbench-results', host_file, 'data')

    def update_output(self, context):  # NOQA
        super(Jankbench, self).update_output(context)
        if self.pull_results_db:
            self.extract_metrics_from_db(context)
        else:
            self.extract_metrics_from_logcat(context)

    def extract_metrics_from_db(self, context):  # pylint: disable=no-self-use
        dbfile = context.get_artifact_path('jankbench-results')
        with sqlite3.connect(dbfile) as conn:
            df = pd.read_sql(
                'select name, iteration, total_duration, jank_frame from ui_results',
                conn)
            g = df.groupby(['name', 'iteration'])
            janks = g.jank_frame.sum()
            janks_pc = janks / g.jank_frame.count() * 100
            results = pd.concat([
                g.total_duration.mean(),
                g.total_duration.std(),
                janks,
                janks_pc,
            ],
                                axis=1)
            results.columns = ['mean', 'std_dev', 'count_jank', 'jank_p']

            for test_name, rep in results.index:
                test_results = results.loc[test_name, rep]
                for metric, value in test_results.items():
                    context.add_metric(metric,
                                       value,
                                       units=None,
                                       lower_is_better=True,
                                       classifiers={
                                           'test_name': test_name,
                                           'rep': rep
                                       })

    def extract_metrics_from_logcat(self, context):
        metric_names = ['mean', 'junk_p', 'std_dev', 'count_bad', 'count_junk']
        logcat_file = context.get_artifact_path('logcat')
        with open(logcat_file, errors='replace') as fh:
            run_tests = copy(self.test_ids or self.valid_test_ids)
            current_iter = None
            current_test = None
            for line in fh:

                match = self.iteration_regex.search(line)
                if match:
                    if current_iter is not None:
                        msg = 'Did not see results for iteration {} of {}'
                        self.logger.warning(
                            msg.format(current_iter, current_test))
                    current_iter = int(match.group('iteration'))
                    if current_iter == 0:
                        try:
                            current_test = run_tests.pop(0)
                        except IndexError:
                            self.logger.warning(
                                'Encountered an iteration for an unknown test.'
                            )
                            current_test = 'unknown'
                    continue

                match = self.metrics_regex.search(line)
                if match:
                    if current_iter is None:
                        self.logger.warning(
                            'Encountered unexpected metrics (no iteration)')
                        continue

                    for name in metric_names:
                        value = numeric(match.group(name))
                        context.add_metric(name,
                                           value,
                                           units=None,
                                           lower_is_better=True,
                                           classifiers={
                                               'test_id': current_test,
                                               'rep': current_iter
                                           })

                    current_iter = None

    def _build_command(self):
        command_params = []
        if self.test_ids:
            test_idxs = [
                str(self.valid_test_ids.index(i)) for i in self.test_ids
            ]
            command_params.append(
                '--eia com.android.benchmark.EXTRA_ENABLED_BENCHMARK_IDS {}'.
                format(','.join(test_idxs)))
        if self.loops:
            command_params.append(
                '--ei com.android.benchmark.EXTRA_RUN_COUNT {}'.format(
                    self.loops))
        return 'am start -W -S -n {}/{} {}'.format(self.package, self.activity,
                                                   ' '.join(command_params))
Exemplo n.º 19
0
class PostgresqlResultProcessor(OutputProcessor):

    name = 'postgres'
    description = """
    Stores results in a Postgresql database.

    The structure of this database can easily be understood by examining
    the postgres_schema.sql file (the schema used to generate it):
    {}
    """.format(os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema.sql'))

    parameters = [
        Parameter('username',
                  default='postgres',
                  description="""
                  This is the username that will be used to connect to the
                  Postgresql database. Note that depending on whether the user
                  has privileges to modify the database (normally only possible
                  on localhost), the user may only be able to append entries.
                  """),
        Parameter('password',
                  default=None,
                  description="""
                  The password to be used to connect to the specified database
                  with the specified username.
                  """),
        Parameter('dbname',
                  default='wa',
                  description="""
                  Name of the database that will be created or added to. Note,
                  to override this, you can specify a value in your user
                  wa configuration file.
                  """),
        Parameter('host',
                  kind=str,
                  default='localhost',
                  description="""
                  The host where the Postgresql server is running. The default
                  is localhost (i.e. the machine that wa is running on).
                  This is useful for complex systems where multiple machines
                  may be executing workloads and uploading their results to
                  a remote, centralised database.
                  """),
        Parameter('port',
                  kind=str,
                  default='5432',
                  description="""
                  The port the Postgresql server is running on, on the host.
                  The default is Postgresql's default, so do not change this
                  unless you have modified the default port for Postgresql.
                  """),
    ]

    # Commands
    sql_command = {
        "create_run":
        "INSERT INTO Runs (oid, event_summary, basepath, status, timestamp, run_name, project, project_stage, retry_on_status, max_retries, bail_on_init_failure, allow_phone_home, run_uuid, start_time, metadata, state, _pod_version, _pod_serialization_version) "
        "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
        "update_run":
        "UPDATE Runs SET event_summary=%s, status=%s, timestamp=%s, end_time=%s, duration=%s, state=%s WHERE oid=%s;",
        "create_job":
        "INSERT INTO Jobs (oid, run_oid, status, retry, label, job_id, iterations, workload_name, metadata, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);",
        "create_target":
        "INSERT INTO Targets (oid, run_oid, target, cpus, os, os_version, hostid, hostname, abi, is_rooted, kernel_version, kernel_release, kernel_sha1, kernel_config, sched_features, page_size_kb, screen_resolution, prop, android_id, _pod_version, _pod_serialization_version) "
        "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
        "create_event":
        "INSERT INTO Events (oid, run_oid, job_oid, timestamp, message, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s",
        "create_artifact":
        "INSERT INTO Artifacts (oid, run_oid, job_oid, name, large_object_uuid, description, kind, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
        "create_metric":
        "INSERT INTO Metrics (oid, run_oid, job_oid, name, value, units, lower_is_better, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s , %s, %s, %s)",
        "create_augmentation":
        "INSERT INTO Augmentations (oid, run_oid, name) VALUES (%s, %s, %s)",
        "create_classifier":
        "INSERT INTO Classifiers (oid, artifact_oid, metric_oid, job_oid, run_oid, key, value) VALUES (%s, %s, %s, %s, %s, %s, %s)",
        "create_parameter":
        "INSERT INTO Parameters (oid, run_oid, job_oid, augmentation_oid, resource_getter_oid, name, value, value_type, type) "
        "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
        "create_resource_getter":
        "INSERT INTO Resource_Getters (oid, run_oid, name) VALUES (%s, %s, %s)",
        "create_job_aug":
        "INSERT INTO Jobs_Augs (oid, job_oid, augmentation_oid) VALUES (%s, %s, %s)",
        "create_large_object":
        "INSERT INTO LargeObjects (oid, lo_oid) VALUES (%s, %s)"
    }

    # Lists to track which run-related items have already been added
    metrics_already_added = []
    # Dicts needed so that jobs can look up ids
    artifacts_already_added = {}
    augmentations_already_added = {}

    # Status bits (flags)
    first_job_run = True

    def __init__(self, *args, **kwargs):
        super(PostgresqlResultProcessor, self).__init__(*args, **kwargs)
        self.conn = None
        self.cursor = None
        self.run_uuid = None
        self.target_uuid = None

    def initialize(self, context):

        if not psycopg2:
            raise ImportError(
                'The psycopg2 module is required for the ' +
                'Postgresql Output Processor: {}'.format(import_error_msg))
        # N.B. Typecasters are for postgres->python and adapters the opposite
        self.connect_to_database()
        self.cursor = self.conn.cursor()
        self.verify_schema_versions()

        # Register the adapters and typecasters for enum types
        self.cursor.execute("SELECT NULL::status_enum")
        status_oid = self.cursor.description[0][1]
        self.cursor.execute("SELECT NULL::param_enum")
        param_oid = self.cursor.description[0][1]
        LEVEL = psycopg2.extensions.new_type((status_oid, ), "LEVEL",
                                             cast_level)
        psycopg2.extensions.register_type(LEVEL)
        PARAM = psycopg2.extensions.new_type((param_oid, ), "PARAM",
                                             cast_vanilla)
        psycopg2.extensions.register_type(PARAM)
        psycopg2.extensions.register_adapter(level, return_as_is(adapt_level))
        psycopg2.extensions.register_adapter(ListOfLevel,
                                             adapt_ListOfX(adapt_level))
        psycopg2.extensions.register_adapter(KernelVersion, adapt_vanilla)
        psycopg2.extensions.register_adapter(CpuInfo, adapt_vanilla)
        psycopg2.extensions.register_adapter(collections.OrderedDict,
                                             extras.Json)
        psycopg2.extensions.register_adapter(dict, extras.Json)
        psycopg2.extensions.register_adapter(
            KernelConfig, create_iterable_adapter(2, explicit_iterate=True))
        # Register ready-made UUID type adapter
        extras.register_uuid()

        # Insert a run_uuid which will be globally accessible during the run
        self.run_uuid = uuid.UUID(str(uuid.uuid4()))
        run_output = context.run_output
        retry_on_status = ListOfLevel(run_output.run_config.retry_on_status)
        self.cursor.execute(
            self.sql_command['create_run'],
            (
                self.run_uuid,
                run_output.event_summary,
                run_output.basepath,
                run_output.status,
                run_output.state.timestamp,
                run_output.info.run_name,
                run_output.info.project,
                run_output.info.project_stage,
                retry_on_status,
                run_output.run_config.max_retries,
                run_output.run_config.bail_on_init_failure,
                run_output.run_config.allow_phone_home,
                run_output.info.uuid,
                run_output.info.start_time,
                run_output.metadata,
                json.dumps(run_output.state.to_pod()),
                run_output.result._pod_version,  # pylint: disable=protected-access
                run_output.result._pod_serialization_version,  # pylint: disable=protected-access
            ))
        self.target_uuid = uuid.uuid4()
        target_info = context.target_info
        target_pod = target_info.to_pod()
        self.cursor.execute(
            self.sql_command['create_target'],
            (
                self.target_uuid,
                self.run_uuid,
                target_pod['target'],
                target_pod['cpus'],
                target_pod['os'],
                target_pod['os_version'],
                target_pod['hostid'],
                target_pod['hostname'],
                target_pod['abi'],
                target_pod['is_rooted'],
                # Important caveat: kernel_version is the name of the column in the Targets table
                # However, this refers to kernel_version.version, not to kernel_version as a whole
                target_pod['kernel_version'],
                target_pod['kernel_release'],
                target_info.kernel_version.sha1,
                target_info.kernel_config,
                target_pod['sched_features'],
                target_pod['page_size_kb'],
                # Android Specific
                list(target_pod.get('screen_resolution', [])),
                target_pod.get('prop'),
                target_pod.get('android_id'),
                target_pod.get('pod_version'),
                target_pod.get('pod_serialization_version'),
            ))

        # Commit cursor commands
        self.conn.commit()

    def export_job_output(self, job_output, target_info, run_output):  # pylint: disable=too-many-branches, too-many-statements, too-many-locals, unused-argument
        ''' Run once for each job to upload information that is
            updated on a job by job basis.
        '''
        job_uuid = uuid.uuid4()
        # Create a new job
        self.cursor.execute(
            self.sql_command['create_job'],
            (
                job_uuid,
                self.run_uuid,
                job_output.status,
                job_output.retry,
                job_output.label,
                job_output.id,
                job_output.iteration,
                job_output.spec.workload_name,
                job_output.metadata,
                job_output.spec._pod_version,  # pylint: disable=protected-access
                job_output.spec._pod_serialization_version,  # pylint: disable=protected-access
            ))

        for classifier in job_output.classifiers:
            classifier_uuid = uuid.uuid4()
            self.cursor.execute(
                self.sql_command['create_classifier'],
                (classifier_uuid, None, None, job_uuid, None, classifier,
                 job_output.classifiers[classifier]))
        # Update the run table and run-level parameters
        self.cursor.execute(
            self.sql_command['update_run'],
            (run_output.event_summary, run_output.status,
             run_output.state.timestamp, run_output.info.end_time, None,
             json.dumps(run_output.state.to_pod()), self.run_uuid))
        for classifier in run_output.classifiers:
            classifier_uuid = uuid.uuid4()
            self.cursor.execute(
                self.sql_command['create_classifier'],
                (classifier_uuid, None, None, None, None, self.run_uuid,
                 classifier, run_output.classifiers[classifier]))
        self.sql_upload_artifacts(run_output, record_in_added=True)
        self.sql_upload_metrics(run_output, record_in_added=True)
        self.sql_upload_augmentations(run_output)
        self.sql_upload_resource_getters(run_output)
        self.sql_upload_events(job_output, job_uuid=job_uuid)
        self.sql_upload_artifacts(job_output, job_uuid=job_uuid)
        self.sql_upload_metrics(job_output, job_uuid=job_uuid)
        self.sql_upload_job_augmentations(job_output, job_uuid=job_uuid)
        self.sql_upload_parameters("workload",
                                   job_output.spec.workload_parameters,
                                   job_uuid=job_uuid)
        self.sql_upload_parameters("runtime",
                                   job_output.spec.runtime_parameters,
                                   job_uuid=job_uuid)
        self.conn.commit()

    def export_run_output(self, run_output, target_info):  # pylint: disable=unused-argument, too-many-locals
        ''' A final export of the RunOutput that updates existing parameters
            and uploads ones which are only generated after jobs have run.
        '''
        if not self.cursor:  # Database did not connect correctly.
            return
        # Update the job statuses following completion of the run
        for job in run_output.jobs:
            job_id = job.id
            job_status = job.status
            self.cursor.execute(
                "UPDATE Jobs SET status=%s WHERE job_id=%s and run_oid=%s",
                (job_status, job_id, self.run_uuid))

        run_uuid = self.run_uuid
        # Update the run entry after jobs have completed
        run_info_pod = run_output.info.to_pod()
        run_state_pod = run_output.state.to_pod()
        sql_command_update_run = self.sql_command['update_run']
        self.cursor.execute(sql_command_update_run, (
            run_output.event_summary,
            run_output.status,
            run_info_pod['start_time'],
            run_info_pod['end_time'],
            run_info_pod['duration'],
            json.dumps(run_state_pod),
            run_uuid,
        ))
        self.sql_upload_events(run_output)
        self.sql_upload_artifacts(run_output, check_uniqueness=True)
        self.sql_upload_metrics(run_output, check_uniqueness=True)
        self.sql_upload_augmentations(run_output)
        self.conn.commit()

    # Upload functions for use with both jobs and runs

    def sql_upload_resource_getters(self, output_object):
        for resource_getter in output_object.run_config.resource_getters:
            resource_getter_uuid = uuid.uuid4()
            self.cursor.execute(self.sql_command['create_resource_getter'], (
                resource_getter_uuid,
                self.run_uuid,
                resource_getter,
            ))
            self.sql_upload_parameters(
                'resource_getter',
                output_object.run_config.resource_getters[resource_getter],
                owner_id=resource_getter_uuid,
            )

    def sql_upload_events(self, output_object, job_uuid=None):
        for event in output_object.events:
            event_uuid = uuid.uuid4()
            self.cursor.execute(
                self.sql_command['create_event'],
                (
                    event_uuid,
                    self.run_uuid,
                    job_uuid,
                    event.timestamp,
                    event.message,
                    event._pod_version,  # pylint: disable=protected-access
                    event._pod_serialization_version,  # pylint: disable=protected-access
                ))

    def sql_upload_job_augmentations(self, output_object, job_uuid=None):
        ''' This is a table which links the uuids of augmentations to jobs.
        Note that the augmentations table is prepopulated, leading to the necessity
        of an augmentaitions_already_added dictionary, which gives us the corresponding
        uuids.
        Augmentations which are prefixed by ~ are toggled off and not part of the job,
        therefore not added.
        '''
        for augmentation in output_object.spec.augmentations:
            if augmentation.startswith('~'):
                continue
            augmentation_uuid = self.augmentations_already_added[augmentation]
            job_aug_uuid = uuid.uuid4()
            self.cursor.execute(self.sql_command['create_job_aug'], (
                job_aug_uuid,
                job_uuid,
                augmentation_uuid,
            ))

    def sql_upload_augmentations(self, output_object):
        for augmentation in output_object.augmentations:
            if augmentation.startswith(
                    '~') or augmentation in self.augmentations_already_added:
                continue
            augmentation_uuid = uuid.uuid4()
            self.cursor.execute(self.sql_command['create_augmentation'], (
                augmentation_uuid,
                self.run_uuid,
                augmentation,
            ))
            self.sql_upload_parameters(
                'augmentation',
                output_object.run_config.augmentations[augmentation],
                owner_id=augmentation_uuid,
            )
            self.augmentations_already_added[augmentation] = augmentation_uuid

    def sql_upload_metrics(self,
                           output_object,
                           record_in_added=False,
                           check_uniqueness=False,
                           job_uuid=None):
        for metric in output_object.metrics:
            if metric in self.metrics_already_added and check_uniqueness:
                continue
            metric_uuid = uuid.uuid4()
            self.cursor.execute(
                self.sql_command['create_metric'],
                (
                    metric_uuid,
                    self.run_uuid,
                    job_uuid,
                    metric.name,
                    metric.value,
                    metric.units,
                    metric.lower_is_better,
                    metric._pod_version,  # pylint: disable=protected-access
                    metric._pod_serialization_version,  # pylint: disable=protected-access
                ))
            for classifier in metric.classifiers:
                classifier_uuid = uuid.uuid4()
                self.cursor.execute(self.sql_command['create_classifier'], (
                    classifier_uuid,
                    None,
                    metric_uuid,
                    None,
                    None,
                    classifier,
                    metric.classifiers[classifier],
                ))
            if record_in_added:
                self.metrics_already_added.append(metric)

    def sql_upload_artifacts(self,
                             output_object,
                             record_in_added=False,
                             check_uniqueness=False,
                             job_uuid=None):
        ''' Uploads artifacts to the database.
        record_in_added will record the artifacts added in artifacts_aleady_added
        check_uniqueness will ensure artifacts in artifacts_already_added do not get added again
        '''
        for artifact in output_object.artifacts:
            if artifact in self.artifacts_already_added and check_uniqueness:
                self.logger.debug(
                    'Skipping uploading {} as already added'.format(artifact))
                continue

            if artifact in self.artifacts_already_added:
                self._sql_update_artifact(artifact, output_object)
            else:
                self._sql_create_artifact(artifact, output_object,
                                          record_in_added, job_uuid)

    def sql_upload_parameters(self,
                              parameter_type,
                              parameter_dict,
                              owner_id=None,
                              job_uuid=None):
        # Note, currently no augmentation parameters are workload specific, but in the future
        # this may change
        augmentation_id = None
        resource_getter_id = None

        if parameter_type not in [
                'workload', 'resource_getter', 'augmentation', 'runtime'
        ]:
            # boot parameters are not yet implemented
            # device parameters are redundant due to the targets table
            raise NotImplementedError(
                "{} is not a valid parameter type.".format(parameter_type))

        if parameter_type == "resource_getter":
            resource_getter_id = owner_id
        elif parameter_type == "augmentation":
            augmentation_id = owner_id

        for parameter in parameter_dict:
            parameter_uuid = uuid.uuid4()
            self.cursor.execute(self.sql_command['create_parameter'], (
                parameter_uuid,
                self.run_uuid,
                job_uuid,
                augmentation_id,
                resource_getter_id,
                parameter,
                json.dumps(parameter_dict[parameter]),
                str(type(parameter_dict[parameter])),
                parameter_type,
            ))

    def connect_to_database(self):
        dsn = "dbname={} user={} password={} host={} port={}".format(
            self.dbname, self.username, self.password, self.host, self.port)
        try:
            self.conn = connect(dsn=dsn)
        except Psycopg2Error as e:
            raise OutputProcessorError(
                "Database error, if the database doesn't exist, " +
                "please use 'wa create database' to create the database: {}".
                format(e))

    def execute_sql_line_by_line(self, sql):
        cursor = self.conn.cursor()
        for line in sql.replace('\n', "").replace(";", ";\n").split("\n"):
            if line and not line.startswith('--'):
                cursor.execute(line)
        cursor.close()
        self.conn.commit()
        self.conn.reset()

    def verify_schema_versions(self):
        local_schema_version, db_schema_version = get_schema_versions(
            self.conn)
        if local_schema_version != db_schema_version:
            self.cursor.close()
            self.cursor = None
            self.conn.commit()
            self.conn.reset()
            msg = 'The current database schema is v{} however the local ' \
                  'schema version is v{}. Please update your database ' \
                  'with the create command'
            raise OutputProcessorError(
                msg.format(db_schema_version, local_schema_version))

    def _sql_write_lobject(self, source, lobject):
        with open(source) as lobj_file:
            lobj_data = lobj_file.read()
        if len(lobj_data) > 50000000:  # Notify if LO inserts larger than 50MB
            self.logger.debug("Inserting large object of size {}".format(
                len(lobj_data)))
        lobject.write(lobj_data)
        self.conn.commit()

    def _sql_update_artifact(self, artifact, output_object):
        self.logger.debug('Updating artifact: {}'.format(artifact))
        lobj = self.conn.lobject(oid=self.artifacts_already_added[artifact],
                                 mode='w')
        self._sql_write_lobject(
            os.path.join(output_object.basepath, artifact.path), lobj)

    def _sql_create_artifact(self,
                             artifact,
                             output_object,
                             record_in_added=False,
                             job_uuid=None):
        self.logger.debug('Uploading artifact: {}'.format(artifact))
        artifact_uuid = uuid.uuid4()
        lobj = self.conn.lobject()
        loid = lobj.oid
        large_object_uuid = uuid.uuid4()

        self._sql_write_lobject(
            os.path.join(output_object.basepath, artifact.path), lobj)

        self.cursor.execute(self.sql_command['create_large_object'], (
            large_object_uuid,
            loid,
        ))
        self.cursor.execute(
            self.sql_command['create_artifact'],
            (
                artifact_uuid,
                self.run_uuid,
                job_uuid,
                artifact.name,
                large_object_uuid,
                artifact.description,
                str(artifact.kind),
                artifact._pod_version,  # pylint: disable=protected-access
                artifact._pod_serialization_version,  # pylint: disable=protected-access
            ))
        for classifier in artifact.classifiers:
            classifier_uuid = uuid.uuid4()
            self.cursor.execute(self.sql_command['create_classifier'], (
                classifier_uuid,
                artifact_uuid,
                None,
                None,
                None,
                classifier,
                artifact.classifiers[classifier],
            ))
        if record_in_added:
            self.artifacts_already_added[artifact] = loid
Exemplo n.º 20
0
class GoogleMaps(ApkUiautoWorkload):

    name = 'googlemaps'
    description = '''
    A workload to perform standard navigation tasks with Google Maps. This workload searches
    for known locations, pans and zooms around the map, and follows driving directions
    along a route.

    To run the workload in offline mode, ``databases.tar`` and ``files.tar`` archives are required.
    In order to generate these files, Google Maps should first be operated from an
    Internet-connected environment, and a region around Cambridge, England should be downloaded
    for offline use. This region must include the landmarks used in the UIAutomator program,
    which include Cambridge train station and Corpus Christi college.

    Following this, the files of interest can be found in the ``databases`` and ``files``
    subdirectories of the ``/data/data/com.google.android.apps.maps/`` directory. The contents
    of these subdirectories can be archived into tarballs using commands such as
    ``tar -cvf databases.tar -C /path/to/databases .``. These ``databases.tar`` and ``files.tar`` archives
    should then be placed in the ``~/.workload_automation/dependencies/googlemaps`` directory on your
    local machine, creating this if it does not already exist.

    Known working APK version: 10.19.1
    '''
    package_names = ['com.google.android.apps.maps']

    parameters = [
        Parameter('offline_mode',
                  kind=bool,
                  default=False,
                  description='''
                  If set to ``True``, the workload will execute in offline mode.
                  This mode requires root and makes use of a tarball of database
                  files ``databases.tar`` and a tarball of auxiliary files ``files.tar``.
                  These tarballs are extracted directly to the application's ``databases``
                  and ``files`` directories respectively in ``/data/data/com.google.android.apps.maps/``.
                  '''),
    ]

    @property
    def requires_network(self):
        return not self.offline_mode

    @property
    def requires_rerun(self):
        # In offline mode we need to restart the application after modifying its data directory
        return self.offline_mode

    def __init__(self, target, **kwargs):
        super(GoogleMaps, self).__init__(target, **kwargs)
        if self.offline_mode:
            self.deployable_assets = ['databases.tar', 'files.tar']
            self.cleanup_assets = True

    def initialize(self, context):
        super(GoogleMaps, self).initialize(context)
        if self.offline_mode and not self.target.is_rooted:
            raise WorkloadError(
                'This workload requires root to set up Google Maps for offline usage.'
            )

    def init_resources(self, context):
        super(GoogleMaps, self).init_resources(context)
        self.gui.uiauto_params['offline_mode'] = self.offline_mode

    def setup_rerun(self):
        super(GoogleMaps, self).setup_rerun()
        package_data_dir = self.target.path.join(
            self.target.package_data_directory, self.package)
        databases_src = self.target.path.join(self.target.working_directory,
                                              'databases.tar')
        databases_dst = self.target.path.join(package_data_dir, 'databases')
        files_src = self.target.path.join(self.target.working_directory,
                                          'files.tar')
        files_dst = self.target.path.join(package_data_dir, 'files')
        owner = self.target.execute("{} stat -c '%u' {}".format(
            self.target.busybox, package_data_dir),
                                    as_root=True).strip()
        self.target.execute('{} tar -xvf {} -C {}'.format(
            self.target.busybox, databases_src, databases_dst),
                            as_root=True)
        self.target.execute('{} tar -xvf {} -C {}'.format(
            self.target.busybox, files_src, files_dst),
                            as_root=True)
        self.target.execute('{} chown -R {}:{} {}'.format(
            self.target.busybox, owner, owner, package_data_dir),
                            as_root=True)
Exemplo n.º 21
0
class ExoPlayer(ApkWorkload):
    """
    Android ExoPlayer

    ExoPlayer is the basic video player library that is used by the YouTube
    android app. The aim of this workload is to test a proxy for YouTube
    performance on targets where running the real YouTube app is not possible
    due its dependencies.

    ExoPlayer sources: https://github.com/google/ExoPlayer

    The 'demo' application is used by this workload.  It can easily be built by
    loading the ExoPlayer sources into Android Studio.

    Version r2.4.0 built from commit d979469 is known to work

    Produces a metric 'exoplayer_dropped_frames' - this is the count of frames
    that Exoplayer itself reports as dropped. This is not the same thing as the
    dropped frames reported by gfxinfo.
    """

    name = 'exoplayer'

    video_directory = os.path.join(settings.dependencies_directory, name)

    package_names = ['com.google.android.exoplayer2.demo']
    supported_versions = ['2.4', '2.5', '2.6']
    action = 'com.google.android.exoplayer.demo.action.VIEW'
    default_format = 'mov_720p'
    view = 'SurfaceView - com.google.android.exoplayer2.demo/com.google.android.exoplayer2.demo.PlayerActivity'

    parameters = [
        Parameter('version', allowed_values=supported_versions, override=True),
        Parameter('duration',
                  kind=int,
                  default=20,
                  description="""
                  Playback duration of the video file. This becomes the duration of the workload.
                  If provided must be shorter than the length of the media.
                  """),
        Parameter('format',
                  allowed_values=list(DOWNLOAD_URLS.keys()),
                  description="""
                  Specifies which format video file to play. Default is {}
                  """.format(default_format)),
        Parameter('filename',
                  description="""
                   The name of the video file to play. This can be either a path
                   to the file anywhere on your file system, or it could be just a
                   name, in which case, the workload will look for it in
                   ``{}``
                   *Note*: either format or filename should be specified, but not both!
                  """.format(video_directory)),
        Parameter('force_dependency_push',
                  kind=boolean,
                  default=False,
                  description="""
                  If true, video will always be pushed to device, regardless
                  of whether the file is already on the device.  Default is ``False``.
                  """),
        Parameter('landscape',
                  kind=boolean,
                  default=False,
                  description="""
                  Configure the screen in landscape mode, otherwise ensure
                  portrait orientation by default. Default is ``False``.
                  """),
    ]

    # pylint: disable=access-member-before-definition
    def validate(self):
        if self.format and self.filename:
            raise ConfigError(
                'Either format *or* filename must be specified; but not both.')

        if not self.format and not self.filename:
            self.format = self.default_format

    def _find_host_video_file(self):
        """Pick the video file we're going to use, download it if necessary"""
        if self.filename:
            if self.filename[0] in './' or len(
                    self.filename) > 1 and self.filename[1] == ':':
                filepath = os.path.abspath(self.filename)
            else:
                filepath = os.path.join(self.video_directory, self.filename)
            if not os.path.isfile(filepath):
                raise WorkloadError('{} does not exist.'.format(filepath))
            return filepath
        else:
            # Search for files we've already downloaded
            files = []
            format_ext, format_resolution = self.format.split('_')
            for filename in os.listdir(self.video_directory):
                _, file_ext = os.path.splitext(filename)
                if file_ext == '.' + format_ext and format_resolution in filename:
                    files.append(os.path.join(self.video_directory, filename))

            if not files:
                # Download a file with the requested format
                url = DOWNLOAD_URLS[self.format]
                filename = '{}_{}'.format(format_resolution,
                                          os.path.basename(url))
                filepath = os.path.join(self.video_directory, filename)
                self.logger.info('Downloading {} to {}...'.format(
                    url, filepath))
                urllib.request.urlretrieve(url, filepath)
                return filepath
            else:
                if len(files) > 1:
                    self.logger.warning(
                        'Multiple files found for {} format. Using {}.'.format(
                            self.format, files[0]))
                    self.logger.warning(
                        'Use "filename"parameter instead of '
                        '"format" to specify a different file.')
                return files[0]

    def init_resources(self, context):  # pylint: disable=unused-argument
        # Needs to happen first, as it sets self.format, which is required by
        # _find_host_video_file
        self.validate()

        ensure_directory_exists(self.video_directory)
        self.host_video_file = self._find_host_video_file()

    def setup(self, context):
        super(ExoPlayer, self).setup(context)

        grant_app_permissions(self.target, self.package)

        self.device_video_file = self.target.path.join(
            self.target.working_directory,
            os.path.basename(self.host_video_file))
        if self.force_dependency_push or not self.target.file_exists(
                self.device_video_file):
            self.logger.info('Copying {} to device.'.format(
                self.host_video_file))
            self.target.push(self.host_video_file, self.device_video_file)

        self._original_orientation = self.target.get_rotation()
        self.target.set_rotation(1 if self.landscape else 0)

        self.play_cmd = 'am start -a {} -d "file://{}"'.format(
            self.action, self.device_video_file)

        self.monitor = self.target.get_logcat_monitor(list(REGEXPS.values()))
        self.monitor.start()

    def run(self, context):
        self.target.execute(self.play_cmd)

        self.monitor.wait_for(REGEXPS['start'])
        self.logger.info('Playing media file')

        line = self.monitor.wait_for(REGEXPS['duration'])[0]
        media_duration_s = int(
            round(float(
                re.search(REGEXPS['duration'], line).group('duration'))))

        self.logger.info(
            'Media duration is {} seconds'.format(media_duration_s))

        if self.duration > media_duration_s:
            raise ConfigError(
                "'duration' param ({}) longer than media duration ({})".format(
                    self.duration, media_duration_s))

        if self.duration:
            self.logger.info(
                'Waiting {} seconds before ending playback'.format(
                    self.duration))
            time.sleep(self.duration)
        else:
            self.logger.info(
                'Waiting for playback completion ({} seconds)'.format(
                    media_duration_s))
            self.monitor.wait_for(REGEXPS['end'],
                                  timeout=media_duration_s + 30)

    def update_output(self, context):
        regex = re.compile(REGEXPS['dropped_frames'])

        dropped_frames = 0
        for line in self.monitor.get_log():
            match = regex.match(line)
            if match:
                dropped_frames += int(match.group('count'))

        context.add_metric('exoplayer_dropped_frames',
                           dropped_frames,
                           lower_is_better=True)

    def teardown(self, context):
        super(ExoPlayer, self).teardown(context)
        self.monitor.stop()
        if self._original_orientation is not None:
            self.target.set_rotation(self._original_orientation)
Exemplo n.º 22
0
class RtApp(Workload):
    # pylint: disable=no-member,attribute-defined-outside-init

    name = 'rt-app'
    description = """
    A test application that simulates configurable real-time periodic load.

    rt-app is a test application that starts multiple periodic threads in order to
    simulate a real-time periodic load. It supports SCHED_OTHER, SCHED_FIFO,
    SCHED_RR as well as the AQuoSA framework and SCHED_DEADLINE.

    The load is described using JSON-like config files. Below are a couple of simple
    examples.


    Simple use case which creates a thread that run 1ms then sleep 9ms
    until the use case is stopped with Ctrl+C:

    .. code-block:: json

        {
            "tasks" : {
                "thread0" : {
                    "loop" : -1,
                    "run" :   20000,
                    "sleep" : 80000
                }
            },
            "global" : {
                "duration" : 2,
                "calibration" : "CPU0",
                "default_policy" : "SCHED_OTHER",
                "pi_enabled" : false,
                "lock_pages" : false,
                "logdir" : "./",
                "log_basename" : "rt-app1",
                "ftrace" : false,
                "gnuplot" : true,
            }
        }


    Simple use case with 2 threads that runs for 10 ms and wake up each
    other until the use case is stopped with Ctrl+C

    .. code-block:: json

        {
            "tasks" : {
                "thread0" : {
                    "loop" : -1,
                    "run" :     10000,
                    "resume" : "thread1",
                    "suspend" : "thread0"
                },
                "thread1" : {
                    "loop" : -1,
                    "run" :     10000,
                    "resume" : "thread0",
                    "suspend" : "thread1"
                }
            }
        }

    Please refer to the existing configs in ``$WA_ROOT/wa/workloads/rt_app/use_case``
    for more examples.

    The upstream version of rt-app is hosted here:

    https://github.com/scheduler-tools/rt-app

    """

    parameters = [
        Parameter('config',
                  kind=str,
                  default='taskset',
                  description='''
                  Use case configuration file to run with rt-app. This may be
                  either the name of one of the "standard" configurations included
                  with the workload. or a path to a custom JSON file provided by
                  the user. Either way, the ".json" extension is implied and will
                  be added automatically if not specified in the argument.

                  The following is the list of standard configurations currently
                  included with the workload: {}

                  '''.format(', '.join(
                      os.listdir(PACKAGED_USE_CASE_DIRECTORY)))),
        Parameter('duration',
                  kind=int,
                  description='''
                  Duration of the workload execution in Seconds. If specified, this
                  will override the corresponding parameter in the JSON config.
                  '''),
        Parameter('cpus',
                  kind=cpu_mask,
                  default=0,
                  aliases=['taskset_mask'],
                  description='Constrain execution to specific CPUs.'),
        Parameter('uninstall',
                  aliases=['uninstall_on_exit'],
                  kind=bool,
                  default=False,
                  override=True,
                  description="""
                  If set to ``True``, rt-app binary will be uninstalled from the device
                  at the end of the run.
                  """),
        Parameter('force_install',
                  kind=bool,
                  default=False,
                  description="""
                  If set to ``True``, rt-app binary will always be deployed to the
                  target device at the beginning of the run, regardless of whether it
                  was already installed there.
                  """),
    ]

    @once
    def initialize(self, context):
        # initialize() runs once per run. setting a class variable to make it
        # available to other instances of the workload
        RtApp.target_working_directory = self.target.path.join(
            self.target.working_directory, 'rt-app-working')
        RtApp.host_binary = context.get_resource(Executable(
            self, self.target.abi, BINARY_NAME),
                                                 strict=False)
        RtApp.workgen_script = context.get_resource(File(self, 'workgen'))
        self.target.execute('mkdir -p {}'.format(
            self.target_working_directory))
        self._deploy_rt_app_binary_if_necessary()

    def setup(self, context):
        self.output = None
        self.log_basename = context.current_job.label
        self.host_json_config = self._load_json_config(context)
        self.config_file_on_target = self.target.path.join(
            self.target_working_directory,
            os.path.basename(self.host_json_config))
        self.target.push(self.host_json_config, self.config_file_on_target)
        self.command = '{} {}'.format(self.target_binary,
                                      self.config_file_on_target)

        time_buffer = 30
        self.timeout = self.duration + time_buffer

    def run(self, context):
        self.output = self.target.invoke(
            self.command,
            in_directory=self.target_working_directory,
            on_cpus=self.cpus and self.cpus.list() or None,
            redirect_stderr=True,
            timeout=self.timeout,
            as_root=self.target.is_rooted)

    def update_output(self, context):
        self._pull_rt_app_logs(context)
        context.output.classifiers.update(
            dict(
                duration=self.duration,
                task_count=self.task_count,
            ))

        if not self.output:
            return
        outfile = os.path.join(context.output_directory, RAW_OUTPUT_FILENAME)
        with open(outfile, 'w') as wfh:
            wfh.write(self.output)

        error_count = 0
        crit_count = 0
        for line in self.output.split('\n'):
            match = PLOAD_REGEX.search(line)
            if match:
                pload_value = match.group(1)
                pload_unit = match.group(2)
                calib_cpu_value = match.group(3)
                context.add_metric('pLoad', float(pload_value), pload_unit)
                context.add_metric('calib_cpu', float(calib_cpu_value))

            error_match = ERROR_REGEX.search(line)
            if error_match:
                error_count += 1

            crit_match = CRIT_REGEX.search(line)
            if crit_match:
                crit_count += 1

        context.add_metric('error_count', error_count, 'count')
        context.add_metric('crit_count', crit_count, 'count')

    @once
    def finalize(self, context):
        if self.uninstall:
            self.target.uninstall(self.target_binary)
        if self.cleanup_assets:
            self.target.execute('rm -rf {}'.format(
                self.target_working_directory))

    def _deploy_rt_app_binary_if_necessary(self):
        # called from initialize() so gets invoked once per run
        RtApp.target_binary = self.target.get_installed("rt-app")
        if self.force_install or not RtApp.target_binary:
            if not self.host_binary:
                message = '''rt-app is not installed on the target and could not be
                             found in workload resources'''
                raise ResourceError(message)
            RtApp.target_binary = self.target.install(self.host_binary)

    def _load_json_config(self, context):
        user_config_file = self._get_raw_json_config(context)
        config_file = self._generate_workgen_config(user_config_file,
                                                    context.output_directory)
        with open(config_file) as fh:
            try:
                config_data = json.load(fh, object_pairs_hook=OrderedDict)
            except ValueError:
                # We were not able to parse the JSON file. Raise an informative error.
                msg = "Failed to parse {}. Please make sure it is valid JSON."
                raise ConfigError(msg.format(user_config_file))

        self._update_rt_app_config(config_data)
        self.duration = config_data['global'].get('duration', 0)
        self.task_count = len(config_data.get('tasks', []))
        with open(config_file, 'w') as wfh:
            json.dump(config_data, wfh, indent=4)
        return config_file

    def _get_raw_json_config(self, resolver):
        if os.path.splitext(self.config)[1] != '.json':
            self.config += '.json'
        if os.path.isfile(self.config):
            return os.path.abspath(self.config)
        partial_path = os.path.join('use_cases', self.config)
        return resolver.get(File(self, partial_path))

    def _generate_workgen_config(self, user_file, output_directory):
        output_file = os.path.join(output_directory, 'unkind.json')
        # use workgen dry run option to generate a use case
        # file with proper JSON grammar on host first
        try:
            check_output('python3 {} -d -o {} {}'.format(
                self.workgen_script, output_file, user_file),
                         shell=True)
        except CalledProcessError as e:
            message = 'Could not generate config using workgen, got "{}"'
            raise WorkloadError(message.format(e))
        return output_file

    def _update_rt_app_config(self, config_data):
        config_data['global'] = config_data.get('global', {})
        config_data['global']['logdir'] = self.target_working_directory
        config_data['global']['log_basename'] = self.log_basename
        if self.duration is not None:
            config_data['global']['duration'] = self.duration

    def _pull_rt_app_logs(self, context):
        tar_command = '{} tar czf {}/{} -C {} .'.format(
            self.target.busybox, self.target_working_directory,
            TARBALL_FILENAME, self.target_working_directory)
        self.target.execute(tar_command, timeout=300)
        target_path = self.target.path.join(self.target_working_directory,
                                            TARBALL_FILENAME)
        host_path = os.path.join(context.output_directory, TARBALL_FILENAME)
        self.target.pull(target_path, host_path)
        with tarfile.open(host_path, 'r:gz') as tf:
            tf.extractall(context.output_directory)
        os.remove(host_path)
        self.target.execute('rm -rf {}/*'.format(
            self.target_working_directory))
Exemplo n.º 23
0
class FpsInstrument(Instrument):

    name = 'fps'
    description = """
    Measures Frames Per Second (FPS) and associated metrics for a workload.

    .. note:: This instrument depends on pandas Python library (which is not part of standard
              WA dependencies), so you will need to install that first, before you can use it.

    Android L and below use SurfaceFlinger to calculate the FPS data.
    Android M and above use gfxinfo to calculate the FPS data.

    SurfaceFlinger:
    The view is specified by the workload as ``view`` attribute. This defaults
    to ``'SurfaceView'`` for game workloads, and ``None`` for non-game
    workloads (as for them FPS mesurement usually doesn't make sense).
    Individual workloads may override this.

    gfxinfo:
    The view is specified by the workload as ``package`` attribute.
    This is because gfxinfo already processes for all views in a package.

    """

    parameters = [
        Parameter('drop_threshold',
                  kind=numeric,
                  default=5,
                  description="""
                  Data points below this FPS will be dropped as they do not
                  constitute "real" gameplay. The assumption being that while
                  actually running, the FPS in the game will not drop below X
                  frames per second, except on loading screens, menus, etc,
                  which should not contribute to FPS calculation.
                  """),
        Parameter('keep_raw',
                  kind=bool,
                  default=False,
                  description="""
                  If set to ``True``, this will keep the raw dumpsys output in
                  the results directory (this is maily used for debugging)
                  Note: frames.csv with collected frames data will always be
                  generated regardless of this setting.
                   """),
        Parameter('crash_threshold',
                  kind=float,
                  default=0.7,
                  description="""
                  Specifies the threshold used to decided whether a
                  measured/expected frames ration indicates a content crash.
                  E.g. a value of ``0.75`` means the number of actual frames
                  counted is a quarter lower than expected, it will treated as
                  a content crash.

                  If set to zero, no crash check will be performed.
                  """),
        Parameter('period',
                  kind=float,
                  default=2,
                  constraint=lambda x: x > 0,
                  description="""
                  Specifies the time period between polling frame data in
                  seconds when collecting frame data. Using a lower value
                  improves the granularity of timings when recording actions
                  that take a short time to complete.  Note, this will produce
                  duplicate frame data in the raw dumpsys output, however, this
                  is filtered out in frames.csv.  It may also affect the
                  overall load on the system.

                  The default value of 2 seconds corresponds with the
                  NUM_FRAME_RECORDS in
                  android/services/surfaceflinger/FrameTracker.h (as of the
                  time of writing currently 128) and a frame rate of 60 fps
                  that is applicable to most devices.
                  """),
        Parameter('force_surfaceflinger',
                  kind=bool,
                  default=False,
                  description="""
                  By default, the method to capture fps data is based on
                  Android version.  If this is set to true, force the
                  instrument to use the SurfaceFlinger method regardless of its
                  Android version.
                  """),
    ]

    def __init__(self, target, **kwargs):
        super(FpsInstrument, self).__init__(target, **kwargs)
        self.collector = None
        self.processor = None
        self._is_enabled = None

    def setup(self, context):
        use_gfxinfo = self.target.get_sdk_version(
        ) >= 23 and not self.force_surfaceflinger
        if use_gfxinfo:
            collector_target_attr = 'package'
        else:
            collector_target_attr = 'view'
        collector_target = getattr(context.workload, collector_target_attr,
                                   None)

        if not collector_target:
            self._is_enabled = False
            msg = 'Workload {} does not define a {}; disabling frame collection and FPS evaluation.'
            self.logger.info(
                msg.format(context.workload.name, collector_target_attr))
            return

        self._is_enabled = True
        if use_gfxinfo:
            self.collector = GfxInfoFramesInstrument(self.target,
                                                     collector_target,
                                                     self.period)
            self.processor = DerivedGfxInfoStats(self.drop_threshold,
                                                 filename='fps.csv')
        else:
            self.collector = SurfaceFlingerFramesInstrument(
                self.target, collector_target, self.period)
            self.processor = DerivedSurfaceFlingerStats(self.drop_threshold,
                                                        filename='fps.csv')
        self.collector.reset()

    def start(self, context):  # pylint: disable=unused-argument
        if not self._is_enabled:
            return
        self.collector.start()

    def stop(self, context):  # pylint: disable=unused-argument
        if not self._is_enabled:
            return
        self.collector.stop()

    def update_output(self, context):
        if not self._is_enabled:
            return
        outpath = os.path.join(context.output_directory, 'frames.csv')
        frames_csv = self.collector.get_data(outpath)
        raw_output = self.collector.get_raw()

        processed = self.processor.process(frames_csv)
        processed.extend(self.processor.process_raw(*raw_output))
        fps, frame_count, fps_csv = processed[:3]
        rest = processed[3:]

        context.add_metric(fps.name, fps.value, fps.units)
        context.add_metric(frame_count.name, frame_count.value,
                           frame_count.units)
        context.add_artifact('frames', frames_csv.path, kind='raw')
        context.add_artifact('fps', fps_csv.path, kind='data')
        for metric in rest:
            context.add_metric(metric.name,
                               metric.value,
                               metric.units,
                               lower_is_better=True)

        if not self.keep_raw:
            for entry in raw_output:
                if os.path.isdir(entry):
                    shutil.rmtree(entry)
                elif os.path.isfile(entry):
                    os.remove(entry)

        if not frame_count.value:
            context.add_event('Could not frind frames data in gfxinfo output')
            context.set_status('PARTIAL')

        self.check_for_crash(context, fps.value, frame_count.value,
                             context.current_job.run_time.total_seconds())

    def check_for_crash(self, context, fps, frames, exec_time):
        if not self.crash_threshold:
            return
        self.logger.debug('Checking for crashed content.')
        if all([exec_time, fps, frames]):
            expected_frames = fps * exec_time
            ratio = frames / expected_frames
            self.logger.debug('actual/expected frames: {:.2}'.format(ratio))
            if ratio < self.crash_threshold:
                msg = 'Content for {} appears to have crashed.\n'.format(
                    context.current_job.spec.label)
                msg += 'Content crash detected (actual/expected frames: {:.2}).'.format(
                    ratio)
                raise WorkloadError(msg)
Exemplo n.º 24
0
class Filer(ResourceGetter):

    name = 'filer'
    description = """
    Finds resources on a (locally mounted) remote filer and caches them
    locally.

    This assumes that the filer is mounted on the local machine (e.g. as a
    samba share).

    """
    parameters = [
        Parameter('remote_path',
                  global_alias='remote_assets_path',
                  default='',
                  description="""
                  Path, on the local system, where the assets are located.
                  """),
        Parameter('always_fetch',
                  kind=boolean,
                  default=False,
                  global_alias='always_fetch_remote_assets',
                  description="""
                  If ``True``, will always attempt to fetch assets from the
                  remote, even if a local cached copy is available.
                  """),
    ]

    def register(self, resolver):
        resolver.register(self.get, SourcePriority.lan)

    def get(self, resource):
        if resource.owner:
            remote_path = os.path.join(self.remote_path, resource.owner.name)
            local_path = os.path.join(settings.dependencies_directory,
                                      '__filer',
                                      resource.owner.dependencies_directory)
            return self.try_get_resource(resource, remote_path, local_path)
        else:  # No owner
            result = None
            for entry in os.listdir(remote_path):
                remote_path = os.path.join(self.remote_path, entry)
                local_path = os.path.join(settings.dependencies_directory,
                                          '__filer',
                                          settings.dependencies_directory,
                                          entry)
                result = self.try_get_resource(resource, remote_path,
                                               local_path)
                if result:
                    break
            return result

    def try_get_resource(self, resource, remote_path, local_path):
        if not self.always_fetch:
            result = get_from_location(local_path, resource)
            if result:
                return result
        if not os.path.exists(local_path):
            return None
        if os.path.exists(remote_path):
            # Didn't find it cached locally; now check the remoted
            result = get_from_location(remote_path, resource)
            if not result:
                return result
        else:  # remote path is not set
            return None
        # Found it remotely, cache locally, then return it
        local_full_path = os.path.join(_d(local_path),
                                       os.path.basename(result))
        self.logger.debug('cp {} {}'.format(result, local_full_path))
        shutil.copy(result, local_full_path)
        return result
Exemplo n.º 25
0
class SysfsExtractor(Instrument):

    name = 'sysfs_extractor'
    description = """
    Collects the contest of a set of directories, before and after workload execution
    and diffs the result.

    """

    mount_command = 'mount -t tmpfs -o size={} tmpfs {}'
    extract_timeout = 30
    tarname = 'sysfs.tar.gz'
    DEVICE_PATH = 0
    BEFORE_PATH = 1
    AFTER_PATH = 2
    DIFF_PATH = 3

    parameters = [
        Parameter(
            'paths',
            kind=list_of_strings,
            mandatory=True,
            description=
            """A list of paths to be pulled from the device. These could be directories
                                as well as files.""",
            global_alias='sysfs_extract_dirs'),
        Parameter('use_tmpfs',
                  kind=bool,
                  default=None,
                  description="""
                  Specifies whether tmpfs should be used to cache sysfile trees and then pull them down
                  as a tarball. This is significantly faster then just copying the directory trees from
                  the device directly, but requires root and may not work on all devices. Defaults to
                  ``True`` if the device is rooted and ``False`` if it is not.
                  """),
        Parameter(
            'tmpfs_mount_point',
            default=None,
            description=
            """Mount point for tmpfs partition used to store snapshots of paths."""
        ),
        Parameter('tmpfs_size',
                  default='32m',
                  description="""Size of the tempfs partition."""),
    ]

    def initialize(self, context):
        if not self.target.is_rooted and self.use_tmpfs:  # pylint: disable=access-member-before-definition
            raise ConfigError(
                'use_tempfs must be False for an unrooted device.')
        elif self.use_tmpfs is None:  # pylint: disable=access-member-before-definition
            self.use_tmpfs = self.target.is_rooted

        if self.use_tmpfs:
            self.on_device_before = self.target.path.join(
                self.tmpfs_mount_point, 'before')
            self.on_device_after = self.target.path.join(
                self.tmpfs_mount_point, 'after')

            if not self.target.file_exists(self.tmpfs_mount_point):
                self.target.execute('mkdir -p {}'.format(
                    self.tmpfs_mount_point),
                                    as_root=True)
                self.target.execute(self.mount_command.format(
                    self.tmpfs_size, self.tmpfs_mount_point),
                                    as_root=True)

    def setup(self, context):
        before_dirs = [
            _d(
                os.path.join(context.output_directory, 'before',
                             self._local_dir(d))) for d in self.paths
        ]
        after_dirs = [
            _d(
                os.path.join(context.output_directory, 'after',
                             self._local_dir(d))) for d in self.paths
        ]
        diff_dirs = [
            _d(
                os.path.join(context.output_directory, 'diff',
                             self._local_dir(d))) for d in self.paths
        ]
        self.device_and_host_paths = list(
            zip(self.paths, before_dirs, after_dirs, diff_dirs))

        if self.use_tmpfs:
            for d in self.paths:
                before_dir = self.target.path.join(
                    self.on_device_before,
                    self.target.path.dirname(as_relative(d)))
                after_dir = self.target.path.join(
                    self.on_device_after,
                    self.target.path.dirname(as_relative(d)))
                if self.target.file_exists(before_dir):
                    self.target.execute('rm -rf  {}'.format(before_dir),
                                        as_root=True)
                self.target.execute('mkdir -p {}'.format(before_dir),
                                    as_root=True)
                if self.target.file_exists(after_dir):
                    self.target.execute('rm -rf  {}'.format(after_dir),
                                        as_root=True)
                self.target.execute('mkdir -p {}'.format(after_dir),
                                    as_root=True)

    @slow
    def start(self, context):
        if self.use_tmpfs:
            for d in self.paths:
                dest_dir = self.target.path.join(self.on_device_before,
                                                 as_relative(d))
                if '*' in dest_dir:
                    dest_dir = self.target.path.dirname(dest_dir)
                self.target.execute('{} cp -Hr {} {}'.format(
                    self.target.busybox, d, dest_dir),
                                    as_root=True,
                                    check_exit_code=False)
        else:  # not rooted
            for dev_dir, before_dir, _, _ in self.device_and_host_paths:
                self.target.pull(dev_dir, before_dir)

    @slow
    def stop(self, context):
        if self.use_tmpfs:
            for d in self.paths:
                dest_dir = self.target.path.join(self.on_device_after,
                                                 as_relative(d))
                if '*' in dest_dir:
                    dest_dir = self.target.path.dirname(dest_dir)
                self.target.execute('{} cp -Hr {} {}'.format(
                    self.target.busybox, d, dest_dir),
                                    as_root=True,
                                    check_exit_code=False)
        else:  # not using tmpfs
            for dev_dir, _, after_dir, _ in self.device_and_host_paths:
                self.target.pull(dev_dir, after_dir)

    def update_output(self, context):
        if self.use_tmpfs:
            on_device_tarball = self.target.path.join(
                self.target.working_directory, self.tarname)
            on_host_tarball = self.target.path.join(context.output_directory,
                                                    self.tarname)
            self.target.execute('{} tar czf {} -C {} .'.format(
                self.target.busybox, on_device_tarball,
                self.tmpfs_mount_point),
                                as_root=True)
            self.target.execute('chmod 0777 {}'.format(on_device_tarball),
                                as_root=True)
            self.target.pull(on_device_tarball, on_host_tarball)
            with tarfile.open(on_host_tarball, 'r:gz') as tf:
                tf.extractall(context.output_directory)
            self.target.remove(on_device_tarball)
            os.remove(on_host_tarball)

        for paths in self.device_and_host_paths:
            after_dir = paths[self.AFTER_PATH]
            dev_dir = paths[self.DEVICE_PATH].strip(
                '*')  # remove potential trailing '*'
            if (not os.listdir(after_dir) and self.target.file_exists(dev_dir)
                    and self.target.list_directory(dev_dir)):
                self.logger.error(
                    'sysfs files were not pulled from the device.')
                self.device_and_host_paths.remove(
                    paths)  # Path is removed to skip diffing it
        for dev_dir, before_dir, after_dir, diff_dir in self.device_and_host_paths:
            diff_sysfs_dirs(before_dir, after_dir, diff_dir)
            context.add_artifact('{} [before]'.format(dev_dir),
                                 before_dir,
                                 kind='data',
                                 classifiers={'stage': 'before'})
            context.add_artifact('{} [after]'.format(dev_dir),
                                 after_dir,
                                 kind='data',
                                 classifiers={'stage': 'after'})
            context.add_artifact('{} [diff]'.format(dev_dir),
                                 diff_dir,
                                 kind='data',
                                 classifiers={'stage': 'diff'})

    def teardown(self, context):
        self._one_time_setup_done = []

    def finalize(self, context):
        if self.use_tmpfs:
            try:
                self.target.execute('umount {}'.format(self.tmpfs_mount_point),
                                    as_root=True)
            except (TargetError, CalledProcessError):
                # assume a directory but not mount point
                pass
            self.target.execute('rm -rf {}'.format(self.tmpfs_mount_point),
                                as_root=True,
                                check_exit_code=False)

    def validate(self):
        if not self.tmpfs_mount_point:  # pylint: disable=access-member-before-definition
            self.tmpfs_mount_point = self.target.get_workpath('temp-fs')

    def _local_dir(self, directory):
        return os.path.dirname(
            as_relative(directory).replace(self.target.path.sep, os.sep))
Exemplo n.º 26
0
class PerfInstrument(Instrument):

    name = 'perf'
    description = """
    Perf is a Linux profiling with performance counters.
    Simpleperf is an Android profiling tool with performance counters.

    It is highly recomended to use perf_type = simpleperf when using this instrument
    on android devices since it recognises android symbols in record mode and is much more stable
    when reporting record .data files. For more information see simpleperf documentation at:
    https://android.googlesource.com/platform/system/extras/+/master/simpleperf/doc/README.md

    Performance counters are CPU hardware registers that count hardware events
    such as instructions executed, cache-misses suffered, or branches
    mispredicted. They form a basis for profiling applications to trace dynamic
    control flow and identify hotspots.

    perf accepts options and events. If no option is given the default '-a' is
    used. For events, the default events for perf are migrations and cs. The default
    events for simpleperf are raw-cpu-cycles, raw-l1-dcache, raw-l1-dcache-refill, raw-instructions-retired.
    They both can be specified in the config file.

    Events must be provided as a list that contains them and they will look like
    this ::

        (for perf_type = perf ) perf_events = ['migrations', 'cs']
        (for perf_type = simpleperf) perf_events = ['raw-cpu-cycles', 'raw-l1-dcache']


    Events can be obtained by typing the following in the command line on the
    device ::

        perf list
        simpleperf list

    Whereas options, they can be provided as a single string as following ::

        perf_options = '-a -i'
        perf_options = '--app com.adobe.reader'

    Options can be obtained by running the following in the command line ::

        man perf-stat
    """

    parameters = [
        Parameter('perf_type',
                  kind=str,
                  allowed_values=['perf', 'simpleperf'],
                  default='perf',
                  global_alias='perf_type',
                  description="""Specifies which type of perf binaries
                  to install. Use simpleperf for collecting perf data on android systems."""
                  ),
        Parameter(
            'command',
            kind=str,
            default='stat',
            allowed_values=['stat', 'record'],
            global_alias='perf_command',
            description="""Specifies which perf command to use. If in record mode
                  report command will also be executed and results pulled from target along with raw data
                  file"""),
        Parameter('events',
                  kind=list_of_strs,
                  global_alias='perf_events',
                  description="""Specifies the events to be counted."""),
        Parameter('optionstring',
                  kind=list_or_string,
                  default='-a',
                  global_alias='perf_options',
                  description=
                  """Specifies options to be used for the perf command. This
                  may be a list of option strings, in which case, multiple instances of perf
                  will be kicked off -- one for each option string. This may be used to e.g.
                  collected different events from different big.LITTLE clusters. In order to
                  profile a particular application process for android with simpleperf use
                  the --app option e.g. --app com.adobe.reader
                  """),
        Parameter(
            'report_option_string',
            kind=str,
            global_alias='perf_report_options',
            default=None,
            description=
            """Specifies options to be used to gather report when record command
                  is used. It's highly recommended to use perf_type simpleperf when running on
                  android devices as reporting options are unstable with perf"""
        ),
        Parameter(
            'labels',
            kind=list_of_strs,
            default=None,
            global_alias='perf_labels',
            description=
            """Provides labels for perf/simpleperf output for each optionstring.
                  If specified, the number of labels must match the number of ``optionstring``\ s.
                  """),
        Parameter('force_install',
                  kind=bool,
                  default=False,
                  description="""
                  always install perf binary even if perf is already present on the device.
                  """),
    ]

    def __init__(self, target, **kwargs):
        super(PerfInstrument, self).__init__(target, **kwargs)
        self.collector = None
        self.outdir = None

    def initialize(self, context):
        self.collector = PerfCollector(self.target, self.perf_type,
                                       self.command, self.events,
                                       self.optionstring,
                                       self.report_option_string, self.labels,
                                       self.force_install)

    def setup(self, context):
        self.outdir = os.path.join(context.output_directory, self.perf_type)
        self.collector.set_output(self.outdir)
        self.collector.reset()

    def start(self, context):
        self.collector.start()

    def stop(self, context):
        self.collector.stop()

    def update_output(self, context):
        self.logger.info('Extracting reports from target...')
        self.collector.get_data()

        if self.perf_type == 'perf':
            self._process_perf_output(context)
        else:
            self._process_simpleperf_output(context)

    def teardown(self, context):
        self.collector.reset()

    def _process_perf_output(self, context):
        if self.command == 'stat':
            self._process_perf_stat_output(context)
        elif self.command == 'record':
            self._process_perf_record_output(context)

    def _process_simpleperf_output(self, context):
        if self.command == 'stat':
            self._process_simpleperf_stat_output(context)
        elif self.command == 'record':
            self._process_simpleperf_record_output(context)

    def _process_perf_stat_output(self, context):
        for host_file in os.listdir(self.outdir):
            label = host_file.split('.out')[0]
            host_file_path = os.path.join(self.outdir, host_file)
            context.add_artifact(label, host_file_path, 'raw')
            with open(host_file_path) as fh:
                in_results_section = False
                for line in fh:
                    if 'Performance counter stats' in line:
                        in_results_section = True
                        next(fh)  # skip the following blank line
                    if not in_results_section:
                        continue
                    if not line.strip():  # blank line
                        in_results_section = False
                        break
                    else:
                        self._add_perf_stat_metric(line, label, context)

    @staticmethod
    def _add_perf_stat_metric(line, label, context):
        line = line.split('#')[0]  # comment
        match = PERF_COUNT_REGEX.search(line)
        if not match:
            return
        classifiers = {}
        cpu = match.group(1)
        if cpu is not None:
            classifiers['cpu'] = int(cpu.replace('CPU', ''))
        count = int(match.group(2))
        metric = '{}_{}'.format(label, match.group(3))
        context.add_metric(metric, count, classifiers=classifiers)

    def _process_perf_record_output(self, context):
        for host_file in os.listdir(self.outdir):
            label, ext = os.path.splitext(host_file)
            context.add_artifact(label, os.path.join(self.outdir, host_file),
                                 'raw')
            column_headers = []
            column_header_indeces = []
            event_type = ''
            if ext == '.rpt':
                with open(os.path.join(self.outdir, host_file)) as fh:
                    for line in fh:
                        words = line.split()
                        if not words:
                            continue
                        event_type = self._get_report_event_type(
                            words, event_type)
                        column_headers = self._get_report_column_headers(
                            column_headers, words, 'perf')
                        for column_header in column_headers:
                            column_header_indeces.append(
                                line.find(column_header))
                        self._add_report_metric(column_headers,
                                                column_header_indeces, line,
                                                words, context, event_type,
                                                label)

    @staticmethod
    def _get_report_event_type(words, event_type):
        if words[0] != '#':
            return event_type
        if len(words) == 6 and words[4] == 'event':
            event_type = words[5]
            event_type = event_type.strip("'")
        return event_type

    def _process_simpleperf_stat_output(self, context):
        labels = []
        for host_file in os.listdir(self.outdir):
            labels.append(host_file.split('.out')[0])
        for opts, label in zip(self.optionstring, labels):
            stat_file = os.path.join(self.outdir, '{}{}'.format(label, '.out'))
            if '--csv' in opts:
                self._process_simpleperf_stat_from_csv(stat_file, context,
                                                       label)
            else:
                self._process_simpleperf_stat_from_raw(stat_file, context,
                                                       label)

    @staticmethod
    def _process_simpleperf_stat_from_csv(stat_file, context, label):
        with open(stat_file) as csv_file:
            readCSV = csv.reader(csv_file, delimiter=',')
            line_num = 0
            for row in readCSV:
                if line_num > 0 and 'Total test time' not in row:
                    classifiers = {
                        'scaled from(%)':
                        row[len(row) - 2].replace('(', '').replace(')',
                                                                   '').replace(
                                                                       '%', '')
                    }
                    context.add_metric('{}_{}'.format(label, row[1]),
                                       row[0],
                                       'count',
                                       classifiers=classifiers)
                line_num += 1

    @staticmethod
    def _process_simpleperf_stat_from_raw(stat_file, context, label):
        with open(stat_file) as fh:
            for line in fh:
                if '#' in line:
                    tmp_line = line.split('#')[0]
                    tmp_line = line.strip()
                    count, metric = tmp_line.split(' ')[0], tmp_line.split(
                        ' ')[2]
                    count = int(count.replace(',', ''))
                    scaled_percentage = line.split('(')[1].strip().replace(
                        ')', '').replace('%', '')
                    scaled_percentage = int(scaled_percentage)
                    metric = '{}_{}'.format(label, metric)
                    context.add_metric(
                        metric,
                        count,
                        'count',
                        classifiers={'scaled from(%)': scaled_percentage})

    def _process_simpleperf_record_output(self, context):
        for host_file in os.listdir(self.outdir):
            label, ext = os.path.splitext(host_file)
            context.add_artifact(label, os.path.join(self.outdir, host_file),
                                 'raw')
            if ext != '.rpt':
                continue
            column_headers = []
            column_header_indeces = []
            event_type = ''
            with open(os.path.join(self.outdir, host_file)) as fh:
                for line in fh:
                    words = line.split()
                    if not words:
                        continue
                    if words[0] == 'Event:':
                        event_type = words[1]
                    column_headers = self._get_report_column_headers(
                        column_headers, words, 'simpleperf')
                    for column_header in column_headers:
                        column_header_indeces.append(line.find(column_header))
                    self._add_report_metric(column_headers,
                                            column_header_indeces, line, words,
                                            context, event_type, label)

    @staticmethod
    def _get_report_column_headers(column_headers, words, perf_type):
        if 'Overhead' not in words:
            return column_headers
        if perf_type == 'perf':
            words.remove('#')
        column_headers = words
        # Concatonate Shared Objects header
        if 'Shared' in column_headers:
            shared_index = column_headers.index('Shared')
            column_headers[shared_index:shared_index + 2] = [
                '{} {}'.format(column_headers[shared_index],
                               column_headers[shared_index + 1])
            ]
        return column_headers

    @staticmethod
    def _add_report_metric(column_headers, column_header_indeces, line, words,
                           context, event_type, label):
        if '%' not in words[0]:
            return
        classifiers = {}
        for i in range(1, len(column_headers)):
            classifiers[column_headers[i]] = line[
                column_header_indeces[i]:column_header_indeces[i + 1]].strip()

        context.add_metric('{}_{}_Overhead'.format(label, event_type),
                           numeric(words[0].strip('%')),
                           'percent',
                           classifiers=classifiers)
Exemplo n.º 27
0
class Http(ResourceGetter):

    name = 'http'
    description = """
    Downloads resources from a server based on an index fetched from the
    specified URL.

    Given a URL, this will try to fetch ``<URL>/index.json``. The index file
    maps extension names to a list of corresponing asset descriptons. Each
    asset description continas a path (relative to the base URL) of the
    resource and a SHA256 hash, so that this Getter can verify whether the
    resource on the remote has changed.

    For example, let's assume we want to get the APK file for workload "foo",
    and that assets are hosted at ``http://example.com/assets``. This Getter
    will first try to donwload ``http://example.com/assests/index.json``. The
    index file may contian something like ::

        {
            "foo": [
                {
                    "path": "foo-app.apk",
                    "sha256": "b14530bb47e04ed655ac5e80e69beaa61c2020450e18638f54384332dffebe86"
                },
                {
                    "path": "subdir/some-other-asset.file",
                    "sha256": "48d9050e9802246d820625717b72f1c2ba431904b8484ca39befd68d1dbedfff"
                }
            ]
        }

    This Getter will look through the list of assets for "foo" (in this case,
    two) check the paths until it finds one matching the resource (in this
    case, "foo-app.apk").  Finally, it will try to dowload that file relative
    to the base URL and extension name (in this case,
    "http://example.com/assets/foo/foo-app.apk"). The downloaded version will
    be cached locally, so that in the future, the getter will check the SHA256
    hash of the local file against the one advertised inside index.json, and
    provided that hasn't changed, it won't try to download the file again.

    """
    parameters = [
        Parameter('url',
                  global_alias='remote_assets_url',
                  description="""
                  URL of the index file for assets on an HTTP server.
                  """),
        Parameter('username',
                  description="""
                  User name for authenticating with assets URL
                  """),
        Parameter('password',
                  description="""
                  Password for authenticationg with assets URL
                  """),
        Parameter('always_fetch',
                  kind=boolean,
                  default=False,
                  global_alias='always_fetch_remote_assets',
                  description="""
                  If ``True``, will always attempt to fetch assets from the
                  remote, even if a local cached copy is available.
                  """),
        Parameter('chunk_size',
                  kind=int,
                  default=1024,
                  description="""
                  Chunk size for streaming large assets.
                  """),
    ]

    def __init__(self, **kwargs):
        super(Http, self).__init__(**kwargs)
        self.logger = logger
        self.index = {}

    def register(self, resolver):
        resolver.register(self.get, SourcePriority.remote)

    def get(self, resource):
        if not resource.owner:
            return  # TODO: add support for unowned resources
        if not self.index:
            try:
                self.index = self.fetch_index()
            except requests.exceptions.RequestException as e:
                msg = 'Skipping HTTP getter due to connection error: {}'
                self.logger.debug(msg.format(e.message))
                return
        if resource.kind == 'apk':
            # APKs must always be downloaded to run ApkInfo for version
            # information.
            return self.resolve_apk(resource)
        else:
            asset = self.resolve_resource(resource)
            if not asset:
                return
            return self.download_asset(asset, resource.owner.name)

    def fetch_index(self):
        if not self.url:
            return {}
        index_url = urljoin(self.url, 'index.json')
        response = self.geturl(index_url)
        if response.status_code != http.client.OK:
            message = 'Could not fetch "{}"; recieved "{} {}"'
            self.logger.error(
                message.format(index_url, response.status_code,
                               response.reason))
            return {}
        if sys.version_info[0] == 3:
            content = response.content.decode('utf-8')
        else:
            content = response.content
        return json.loads(content)

    def download_asset(self, asset, owner_name):
        url = urljoin(self.url, owner_name, asset['path'])
        local_path = _f(
            os.path.join(settings.dependencies_directory, '__remote',
                         owner_name, asset['path'].replace('/', os.sep)))
        if os.path.exists(local_path) and not self.always_fetch:
            local_sha = sha256(local_path)
            if local_sha == asset['sha256']:
                self.logger.debug('Local SHA256 matches; not re-downloading')
                return local_path
        self.logger.debug('Downloading {}'.format(url))
        response = self.geturl(url, stream=True)
        if response.status_code != http.client.OK:
            message = 'Could not download asset "{}"; recieved "{} {}"'
            self.logger.warning(
                message.format(url, response.status_code, response.reason))
            return
        with open(local_path, 'wb') as wfh:
            for chunk in response.iter_content(chunk_size=self.chunk_size):
                wfh.write(chunk)
        return local_path

    def geturl(self, url, stream=False):
        if self.username:
            auth = (self.username, self.password)
        else:
            auth = None
        return requests.get(url, auth=auth, stream=stream)

    def resolve_apk(self, resource):
        assets = self.index.get(resource.owner.name, {})
        if not assets:
            return None
        asset_map = {a['path']: a for a in assets}
        paths = get_path_matches(resource, list(asset_map.keys()))
        local_paths = []
        for path in paths:
            local_paths.append(
                self.download_asset(asset_map[path], resource.owner.name))
        for path in local_paths:
            if resource.match(path):
                return path

    def resolve_resource(self, resource):
        # pylint: disable=too-many-branches,too-many-locals
        assets = self.index.get(resource.owner.name, {})
        if not assets:
            return {}

        asset_map = {a['path']: a for a in assets}
        if resource.kind in ['jar', 'revent']:
            path = get_generic_resource(resource, list(asset_map.keys()))
            if path:
                return asset_map[path]
        elif resource.kind == 'executable':
            path = '/'.join(['bin', resource.abi, resource.filename])
            for asset in assets:
                if asset['path'].lower() == path.lower():
                    return asset
        else:  # file
            for asset in assets:
                if asset['path'].lower() == resource.path.lower():
                    return asset
Exemplo n.º 28
0
class Googlephotos(ApkUiautoWorkload):

    name = 'googlephotos'
    package_names = ['com.google.android.apps.photos']
    description = '''
    A workload to perform standard productivity tasks with Google Photos. The workload carries out
    various tasks, such as browsing images, performing zooms, and post-processing the image.

    Test description:

    1. Four images are copied to the target
    2. The application is started in offline access mode
    3. Gestures are performed to pinch zoom in and out of the selected image
    4. The colour of a selected image is edited by selecting the colour menu, incrementing the
       colour, resetting the colour and decrementing the colour using the seek bar.
    5. A crop test is performed on a selected image.  UiAutomator does not allow the selection of
       the crop markers so the image is tilted positively, reset and then tilted negatively to get a
       similar cropping effect.
    6. A rotate test is performed on a selected image, rotating anticlockwise 90 degrees, 180
       degrees and 270 degrees.

    Known working APK version: 4.0.0.212659618
    '''

    default_test_images = [
        'uxperf_1200x1600.png',
        'uxperf_1600x1200.jpg',
        'uxperf_2448x3264.png',
        'uxperf_3264x2448.jpg',
    ]

    parameters = [
        Parameter('test_images',
                  kind=list_of_strs,
                  default=default_test_images,
                  constraint=lambda x: len(unique(x)) == 4,
                  description='''
                  A list of four JPEG and/or PNG files to be pushed to the target.
                  Absolute file paths may be used but tilde expansion must be escaped.
                  '''),
    ]

    def __init__(self, target, **kwargs):
        super(Googlephotos, self).__init__(target, **kwargs)
        self.deployable_assets = self.test_images

    def init_resources(self, context):
        super(Googlephotos, self).init_resources(context)
        # Only accept certain image formats
        for image in self.test_images:
            if os.path.splitext(
                    image.lower())[1] not in ['.jpg', '.jpeg', '.png']:
                raise ValidationError(
                    '{} must be a JPEG or PNG file'.format(image))

    def deploy_assets(self, context):
        super(Googlephotos, self).deploy_assets(context)
        # Create a subfolder for each test_image named ``wa-[1-4]``
        # Move each image into its subfolder
        # This is to guarantee ordering and allows the workload to select a specific
        # image by subfolder, as filenames are not shown easily within the app
        d = self.target.working_directory
        e = self.target.external_storage

        file_list = []

        for i, f in enumerate(self.test_images):
            orig_file_path = self.target.path.join(d, f)
            new_dir = self.target.path.join(e, 'wa', 'wa-{}'.format(i + 1))
            new_file_path = self.target.path.join(new_dir, f)

            self.target.execute('mkdir -p {}'.format(new_dir))
            self.target.execute('cp {} {}'.format(orig_file_path,
                                                  new_file_path))
            self.target.execute('rm {}'.format(orig_file_path))
            file_list.append(new_file_path)
        self.deployed_assets = file_list
        # Force rescan
        self.target.refresh_files(self.deployed_assets)

    def remove_assets(self, context):
        for asset in self.deployed_assets:
            self.target.remove(os.path.dirname(asset))
        self.target.refresh_files(self.deployed_assets)
Exemplo n.º 29
0
class SerialMon(Instrument):

    name = 'serialmon'
    description = """
    Records the traffic on a serial connection

    The traffic on a serial connection is monitored and logged to a
    file. In the event that the device is reset, the instrument will
    stop monitoring during the reset, and will reconnect once the
    reset has completed. This is to account for devices (i.e., the
    Juno) which utilise the serial connection to reset the board.
    """

    parameters = [
        Parameter('serial_port',
                  kind=str,
                  default="/dev/ttyS0",
                  description="""
                  The serial device to monitor.
                  """),
        Parameter('baudrate',
                  kind=int,
                  default=115200,
                  description="""
                  The baud-rate to use when connecting to the serial connection.
                  """),
    ]

    def __init__(self, target, **kwargs):
        super(SerialMon, self).__init__(target, **kwargs)
        self._collector = SerialTraceCollector(target, self.serial_port,
                                               self.baudrate)

    def start_logging(self, context, filename="serial.log"):
        outpath = os.path.join(context.output_directory, filename)
        self._collector.set_output(outpath)
        self._collector.reset()
        self.logger.debug("Acquiring serial port ({})".format(
            self.serial_port))
        if self._collector.collecting:
            self.stop_logging(context)
        self._collector.start()

    def stop_logging(self, context, identifier="job"):
        self.logger.debug("Releasing serial port ({})".format(
            self.serial_port))
        if self._collector.collecting:
            self._collector.stop()
            data = self._collector.get_data()
            for l in data:  # noqa: E741
                context.add_artifact("{}_serial_log".format(identifier),
                                     l.path,
                                     kind="log")

    def on_run_start(self, context):
        self.start_logging(context, "preamble_serial.log")

    def before_job_queue_execution(self, context):
        self.stop_logging(context, "preamble")

    def after_job_queue_execution(self, context):
        self.start_logging(context, "postamble_serial.log")

    def on_run_end(self, context):
        self.stop_logging(context, "postamble")

    def on_job_start(self, context):
        self.start_logging(context)

    def on_job_end(self, context):
        self.stop_logging(context)

    @hostside
    def before_reboot(self, context):
        self.stop_logging(context)
Exemplo n.º 30
0
class ManualWorkload(Workload):

    name = 'manual'
    description = """
    Yields control to the user, either for a fixed period or based on user
    input, to perform custom operations on the device, which workload
    automation does not know of.

    """
    default_duration = 30

    parameters = [
        Parameter('duration', kind=int, default=None,
                  description="""
                  Control of the devices is yielded for the duration (in
                  seconds) specified.  If not specified, ``user_triggered`` is
                  assumed.
                  """),
        Parameter('user_triggered', kind=bool, default=None,
                  description="""
                  If ``True``, WA will wait for user input after starting the
                  workload; otherwise fixed duration is expected. Defaults to
                  ``True`` if ``duration`` is not specified, and ``False``
                  otherwise.
                  """),
        Parameter('view', default='SurfaceView',
                  description="""
                  Specifies the View of the workload. This enables instruments
                  that require a View to be specified, such as the ``fps``
                  instrument.  This is required for using "SurfaceFlinger" to
                  collect FPS statistics and is primarily used on devices pre
                  API level 23.
                  """),
        Parameter('package',
                  description="""
                  Specifies the package name of the workload. This enables
                  instruments that require a Package to be specified, such as
                  the ``fps`` instrument. This allows for "gfxinfo" to be used
                  and is the preferred method of collection for FPS statistics
                  on devices API level 23+.
                  """),
    ]

    def validate(self):
        if self.duration is None:
            if self.user_triggered is None:
                self.user_triggered = True
            elif self.user_triggered is False:
                self.duration = self.default_duration
        if self.user_triggered and self.duration:
            message = 'Manual Workload can either specify duration or be user triggered, but not both'
            raise ConfigError(message)
        if not self.user_triggered and not self.duration:
            raise ConfigError('Either user_triggered must be ``True`` or duration must be > 0.')

    def setup(self, context):
        self.logger.info('Any setup required by your workload should be done now.')
        self.logger.info('As soon as you are done hit any key and wait for the message')
        self.logger.info('"START NOW!" to begin your manual workload.')
        self.logger.info('')
        self.logger.info('hit any key to finalize your setup...')
        getch()

    def run(self, context):
        self.logger.info('START NOW!')
        if self.duration:
            self.target.sleep(self.duration)
        elif self.user_triggered:
            self.logger.info('')
            self.logger.info('hit any key to end your workload execution...')
            getch()
        else:
            raise ConfigError('Illegal parameters for manual workload')
        self.logger.info('DONE!')