def _PrintPerfResults(self):
    if self._use_legacy_method:
      surface_after = self._GetSurfaceStatsLegacy()
      td = surface_after['timestamp'] - self._surface_before['timestamp']
      seconds = td.seconds + td.microseconds / 1e6
      frame_count = (surface_after['page_flip_count'] -
                     self._surface_before['page_flip_count'])
    else:
      assert self._collector_thread
      (seconds, latencies) = self._GetDataFromThread()
      if not seconds or not len(latencies):
        logging.warning('Surface stat data is empty')
        return

      frame_count = len(latencies)
      jitter_count = 0
      last_latency = latencies[0]
      for latency in latencies[1:]:
        if latency > last_latency:
          jitter_count = jitter_count + 1
        last_latency = latency

      perf_tests_helper.PrintPerfResult(
          'surface_latencies', 'surface_latencies' + self._trace_tag,
          latencies, '')
      perf_tests_helper.PrintPerfResult(
          'peak_jitter', 'peak_jitter' + self._trace_tag, [max(latencies)], '')
      perf_tests_helper.PrintPerfResult(
          'jitter_percent', 'jitter_percent' + self._trace_tag,
          [jitter_count * 100.0 / frame_count], 'percent')

    print 'SurfaceMonitorTime: %fsecs' % seconds
    perf_tests_helper.PrintPerfResult(
        'avg_surface_fps', 'avg_surface_fps' + self._trace_tag,
        [int(round(frame_count / seconds))], 'fps')
Ejemplo n.º 2
0
    def TearDownPerfMonitoring(self, test):
        """Cleans up performance monitoring if the specified test required it.

    Args:
      test: The name of the test that was just run.
    Raises:
      Exception: if there's anything wrong with the perf data.
    """
        if not self._IsPerfTest(test):
            return
        raw_test_name = test.split('#')[1]

        # Wait and grab annotation data so we can figure out which traces to parse
        regex = self.adb.WaitForLogMatch(
            re.compile('\*\*PERFANNOTATION\(' + raw_test_name + '\)\:(.*)'),
            None)

        # If the test is set to run on a specific device type only (IE: only
        # tablet or phone) and it is being run on the wrong device, the test
        # just quits and does not do anything.  The java test harness will still
        # print the appropriate annotation for us, but will add --NORUN-- for
        # us so we know to ignore the results.
        # The --NORUN-- tag is managed by MainActivityTestBase.java
        if regex.group(1) != '--NORUN--':

            # Obtain the relevant perf data.  The data is dumped to a
            # JSON formatted file.
            json_string = self.adb.GetProtectedFileContents(
                '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt'
            )

            if json_string:
                json_string = '\n'.join(json_string)
            else:
                raise Exception('Perf file does not exist or is empty')

            if self.options.save_perf_json:
                json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
                with open(json_local_file, 'w') as f:
                    f.write(json_string)
                logging.info('Saving Perf UI JSON from test ' + test + ' to ' +
                             json_local_file)

            raw_perf_data = regex.group(1).split(';')

            for raw_perf_set in raw_perf_data:
                if raw_perf_set:
                    perf_set = raw_perf_set.split(',')
                    if len(perf_set) != 3:
                        raise Exception(
                            'Unexpected number of tokens in perf annotation '
                            'string: ' + raw_perf_set)

                    # Process the performance data
                    result = json_perf_parser.GetAverageRunInfoFromJSONString(
                        json_string, perf_set[0])
                    perf_tests_helper.PrintPerfResult(perf_set[1], perf_set[2],
                                                      [result['average']],
                                                      result['units'])
Ejemplo n.º 3
0
def main():
    parser = optparse.OptionParser()
    parser.add_option('',
                      '--out-dir',
                      help='Directory where the device path is stored',
                      default=os.path.join(constants.DIR_SOURCE_ROOT, 'out'))
    parser.add_option(
        '--no-provisioning-check',
        help='Will not check if devices are provisioned properly.')
    parser.add_option('--device-status-dashboard',
                      help='Output device status data for dashboard.')
    options, args = parser.parse_args()
    if args:
        parser.error('Unknown options %s' % args)
    devices = android_commands.GetAttachedDevices()
    # TODO(navabi): Test to make sure this fails and then fix call
    offline_devices = android_commands.GetAttachedDevices(hardware=False,
                                                          emulator=False,
                                                          offline=True)

    types, builds, batteries, reports, errors = [], [], [], [], []
    fail_step_lst = []
    if devices:
        types, builds, batteries, reports, errors, fail_step_lst = (zip(
            *[DeviceInfo(dev, options) for dev in devices]))

    err_msg = CheckForMissingDevices(options, devices) or []

    unique_types = list(set(types))
    unique_builds = list(set(builds))

    bb_annotations.PrintMsg('Online devices: %d. Device types %s, builds %s' %
                            (len(devices), unique_types, unique_builds))
    print '\n'.join(reports)

    for serial, dev_errors in zip(devices, errors):
        if dev_errors:
            err_msg += ['%s errors:' % serial]
            err_msg += ['    %s' % error for error in dev_errors]

    if err_msg:
        bb_annotations.PrintWarning()
        msg = '\n'.join(err_msg)
        print msg
        SendDeviceStatusAlert(msg)

    if options.device_status_dashboard:
        perf_tests_helper.PrintPerfResult('BotDevices', 'OnlineDevices',
                                          [len(devices)], 'devices')
        perf_tests_helper.PrintPerfResult('BotDevices', 'OfflineDevices',
                                          [len(offline_devices)], 'devices',
                                          'unimportant')
        for serial, battery in zip(devices, batteries):
            perf_tests_helper.PrintPerfResult('DeviceBattery', serial,
                                              [battery], '%', 'unimportant')

    if False in fail_step_lst:
        # TODO(navabi): Build fails on device status check step if there exists any
        # devices with critically low battery or install speed. Remove those devices
        # from testing, allowing build to continue with good devices.
        return 1

    if not devices:
        return 1