コード例 #1
0
 def PrintSummary(self, trace_tag):
   if self.page_failures:
     return
   for measurement_units_type, values in sorted(
       self.results_summary.iteritems()):
     measurement, units, data_type = measurement_units_type
     if data_type == 'histogram':
       # For histograms, the _by_url data is important.
       by_url_data_type = 'histogram'
     else:
       # For non-histograms, the _by_url data is unimportant.
       by_url_data_type = 'unimportant'
     if '.' in measurement:
       measurement, trace = measurement.split('.', 1)
       trace += (trace_tag or '')
     else:
       trace = measurement + (trace_tag or '')
     if len(self.urls) > 1 and not trace_tag:
       print
       assert len(self.urls) == len(values)
       for i, value in enumerate(values):
         PrintPerfResult(measurement + '_by_url', self.urls[i], [value], units,
                         by_url_data_type)
     # For histograms, we don't print the average data, only the _by_url.
     if not data_type == 'histogram':
       PrintPerfResult(measurement, trace, values, units, data_type)
コード例 #2
0
    def TearDownPerfMonitoring(self, test):
        """Cleans up performance monitoring if the specified test required it.

    Args:
      test: The name of the test that was just run.
    Raises:
      FatalTestException: if there's anything wrong with the perf data.
    """
        if not self._IsPerfTest(test):
            return
        raw_test_name = test.split('#')[1]

        # Wait and grab annotation data so we can figure out which traces to parse
        regex = self.adb.WaitForLogMatch(
            re.compile('\*\*PERFANNOTATION\(' + raw_test_name + '\)\:(.*)'),
            None)

        # If the test is set to run on a specific device type only (IE: only
        # tablet or phone) and it is being run on the wrong device, the test
        # just quits and does not do anything.  The java test harness will still
        # print the appropriate annotation for us, but will add --NORUN-- for
        # us so we know to ignore the results.
        # The --NORUN-- tag is managed by MainActivityTestBase.java
        if regex.group(1) != '--NORUN--':

            # Obtain the relevant perf data.  The data is dumped to a
            # JSON formatted file.
            json_string = self.adb.GetFileContents(
                '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt'
            )

            if json_string:
                json_string = '\n'.join(json_string)
            else:
                raise FatalTestException(
                    'Perf file does not exist or is empty')

            if self.save_perf_json:
                json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
                with open(json_local_file, 'w') as f:
                    f.write(json_string)
                logging.info('Saving Perf UI JSON from test ' + test + ' to ' +
                             json_local_file)

            raw_perf_data = regex.group(1).split(';')

            for raw_perf_set in raw_perf_data:
                if raw_perf_set:
                    perf_set = raw_perf_set.split(',')
                    if len(perf_set) != 3:
                        raise FatalTestException(
                            'Unexpected number of tokens in '
                            'perf annotation string: ' + raw_perf_set)

                    # Process the performance data
                    result = GetAverageRunInfoFromJSONString(
                        json_string, perf_set[0])

                    PrintPerfResult(perf_set[1], perf_set[2],
                                    [result['average']], result['units'])
コード例 #3
0
    def _EndGetIOStats(self, initial_io_stats):
        """Gets I/O statistics after running test and calcuate the I/O delta.

    Args:
      initial_io_stats: I/O stats object got from _BeginGetIOStats.

    Return:
      String for formated diso I/O statistics.
    """
        disk_io = ''
        if self.performance_test and initial_io_stats:
            final_io_stats = self.adb.GetIoStats()
            for stat in final_io_stats:
                disk_io += '\n' + PrintPerfResult(
                    stat, stat,
                    [final_io_stats[stat] - initial_io_stats[stat]],
                    stat.split('_')[1], True, False)
            logging.info(disk_io)
        return disk_io
コード例 #4
0
 def PrintSummary(self, trace_tag):
   for measurement_units_type, values in sorted(
       self.results_summary.iteritems()):
     measurement, units, data_type = measurement_units_type
     trace = measurement + (trace_tag or '')
     PrintPerfResult(measurement, trace, values, units, data_type)
コード例 #5
0
 def PrintSummary(self, trace_tag):
   for measurement_units, values in self.results_summary.iteritems():
     measurement, units = measurement_units
     trace = measurement + (trace_tag or '')
     PrintPerfResult(measurement, trace, values, units)