def ValidateAndMeasurePage(self, page, tab, results):
        get_results_js = """
        (function() {
          for (var i = 0; i < __results.length; i++) {
            if (!__results[i].indexOf('Raw results: ')) return __results[i];
          }
          return null;
        })();
        """

        tab.WaitForDocumentReadyStateToBeComplete()
        tab.EvaluateJavaScript('JetStream.start()')
        tab.WaitForJavaScriptExpression(get_results_js, 600)

        result = tab.EvaluateJavaScript(get_results_js)
        result = json.loads(result.partition(': ')[2])

        all_score_lists = []
        for k, v in result.iteritems():
            results.AddValue(
                list_of_scalar_values.ListOfScalarValues(results.current_page,
                                                         k.replace('.', '_'),
                                                         'score',
                                                         v['result'],
                                                         important=False))
            # Collect all test scores to compute geometric mean.
            for i, score in enumerate(v['result']):
                if len(all_score_lists) <= i:
                    all_score_lists.append([])
                all_score_lists[i].append(score)
        all_scores = []
        for score_list in all_score_lists:
            all_scores.append(statistics.GeometricMean(score_list))
        results.AddSummaryValue(
            list_of_scalar_values.ListOfScalarValues(None, 'Score', 'score',
                                                     all_scores))
Esempio n. 2
0
 def AddOneResult(metric, unit):
   metrics = media_metric['metrics']
   for m in metrics:
     if m.startswith(metric):
       special_label = m[len(metric):]
       trace_name = '%s.%s%s' % (metric, trace, special_label)
       if isinstance(metrics[m], list):
         results.AddValue(list_of_scalar_values.ListOfScalarValues(
             results.current_page, trace_name, unit,
             values=[float(v) for v in metrics[m]],
             important=True))
       else:
         results.AddValue(scalar.ScalarValue(
             results.current_page, trace_name, unit, value=float(metrics[m]),
             important=True))
    def ParseTestResults(self, action_runner):
        self.AddJavascriptMetricValue(
            list_of_scalar_values.ListOfScalarValues(
                self,
                'Total',
                'ms',
                action_runner.EvaluateJavaScript(
                    'benchmarkClient._timeValues'),
                important=True))
        self.AddJavascriptMetricValue(
            list_of_scalar_values.ListOfScalarValues(
                self,
                'RunsPerMinute',
                'score',
                action_runner.EvaluateJavaScript(
                    '[parseFloat(document.getElementById("result-number").innerText)];'
                ),
                important=True))

        # Extract the timings for each suite
        for suite_name in self.enabled_suites:
            self.AddJavascriptMetricValue(
                list_of_scalar_values.ListOfScalarValues(
                    self,
                    suite_name,
                    'ms',
                    action_runner.EvaluateJavaScript("""
              var suite_times = [];
              for(var i = 0; i < benchmarkClient.iterationCount; i++) {
                suite_times.push(
                    benchmarkClient._measuredValues[i].tests[{{ key }}].total);
              };
              suite_times;
              """,
                                                     key=suite_name),
                    important=False))
Esempio n. 4
0
    def ParseTestResults(self, action_runner):
        if not self._should_filter_suites:
            self.AddJavascriptMetricValue(
                list_of_scalar_values.ListOfScalarValues(
                    self,
                    'Total',
                    'ms',
                    action_runner.EvaluateJavaScript(
                        'suiteValues.map(each => each.total)'),
                    important=True))
            self.AddJavascriptMetricValue(
                list_of_scalar_values.ListOfScalarValues(
                    self,
                    'RunsPerMinute',
                    'score',
                    action_runner.EvaluateJavaScript(
                        'suiteValues.map(each => each.score)'),
                    important=True))

        # Extract the timings for each suite
        for suite_name in self._enabled_suites:
            self.AddJavascriptMetricValue(
                list_of_scalar_values.ListOfScalarValues(
                    self,
                    suite_name,
                    'ms',
                    action_runner.EvaluateJavaScript("""
              var suite_times = [];
              for(var i = 0; i < iterationCount; i++) {
                suite_times.push(
                    suiteValues[i].tests[{{ key }}].total);
              };
              suite_times;
              """,
                                                     key=suite_name),
                    important=False))
Esempio n. 5
0
 def _ComputeLatencyMetric(self, page, stats, name, list_of_latency_lists):
   """Returns Values for given latency stats."""
   none_value_reason = None
   latency_list = None
   if self._HasEnoughFrames(stats.frame_timestamps):
     latency_list = perf_tests_helper.FlattenList(list_of_latency_lists)
     if len(latency_list) == 0:
       return None
   else:
     none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
   return list_of_scalar_values.ListOfScalarValues(
       page, name, 'ms', latency_list,
       description='Raw %s values' % name,
       none_value_reason=none_value_reason,
       improvement_direction=improvement_direction.DOWN)
Esempio n. 6
0
  def testBasicSummaryWithOnlyOnePage(self):
    page0 = self.pages[0]

    results = self.getPageTestResults()

    results.WillRunPage(page0)
    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
                            improvement_direction=improvement_direction.UP)
    results.AddValue(v0)
    results.DidRunPage(page0)

    summary = summary_module.Summary(results)
    values = summary.interleaved_computed_per_page_values_and_summaries

    v0_list = list_of_scalar_values.ListOfScalarValues(
        page0, 'a', 'seconds', [3],
        improvement_direction=improvement_direction.UP)
    merged_list = list_of_scalar_values.ListOfScalarValues(
        None, 'a', 'seconds', [3],
        improvement_direction=improvement_direction.UP)

    self.assertEquals(2, len(values))
    self.assertIn(v0_list, values)
    self.assertIn(merged_list, values)
Esempio n. 7
0
  def testBasicSummary(self):
    page0 = self.pages[0]
    page1 = self.pages[1]

    results = self.getPageTestResults()

    results.WillRunPage(page0)
    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
                            improvement_direction=improvement_direction.UP)
    results.AddValue(v0)
    results.DidRunPage(page0)

    results.WillRunPage(page1)
    v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
                            improvement_direction=improvement_direction.UP)
    results.AddValue(v1)
    results.DidRunPage(page1)

    summary = summary_module.Summary(results)
    values = summary.interleaved_computed_per_page_values_and_summaries

    v0_list = list_of_scalar_values.ListOfScalarValues(
        page0, 'a', 'seconds', [3],
        improvement_direction=improvement_direction.UP)
    v1_list = list_of_scalar_values.ListOfScalarValues(
        page1, 'a', 'seconds', [7],
        improvement_direction=improvement_direction.UP)
    # Std is 0 because we only have one measurement per page.
    merged_value = list_of_scalar_values.ListOfScalarValues(
        None, 'a', 'seconds', [3, 7], std=0.0,
        improvement_direction=improvement_direction.UP)

    self.assertEquals(3, len(values))
    self.assertIn(v0_list, values)
    self.assertIn(v1_list, values)
    self.assertIn(merged_value, values)
Esempio n. 8
0
 def _MergeLikeValues(cls, values, page, name, tir_label):
     v0 = values[0]
     merged_value = [v.value for v in values]
     none_value_reason = None
     if None in merged_value:
         merged_value = None
         none_value_reason = none_values.MERGE_FAILURE_REASON
     return list_of_scalar_values.ListOfScalarValues(
         page,
         name,
         v0.units,
         merged_value,
         important=v0.important,
         tir_label=tir_label,
         none_value_reason=none_value_reason)
Esempio n. 9
0
 def _AddWriteResultsInternal(self, events, interactions, results):
     writes = []
     for event in events:
         if (self.IsWriteEvent(event) and any(
                 self.IsEventInInteraction(event, interaction)
                 for interaction in interactions)):
             writes.append(self.ThreadDurationIfPresent(event))
     if writes:
         results.AddValue(
             list_of_scalar_values.ListOfScalarValues(
                 page=results.current_page,
                 name='blob-writes',
                 units='ms',
                 values=writes,
                 description='List of durations of blob writes.'))
     else:
         results.AddValue(
             list_of_scalar_values.ListOfScalarValues(
                 page=results.current_page,
                 name='blob-writes',
                 units='ms',
                 values=None,
                 none_value_reason=
                 'No blob write events found for this interaction.'))
Esempio n. 10
0
    def testListWithNoneValueMerging(self):
        page0 = self.pages[0]
        v0 = list_of_scalar_values.ListOfScalarValues(
            page0,
            'x',
            'unit', [1, 2],
            same_page_merge_policy=value.CONCATENATE,
            improvement_direction=improvement_direction.UP)
        v1 = list_of_scalar_values.ListOfScalarValues(
            page0,
            'x',
            'unit',
            None,
            same_page_merge_policy=value.CONCATENATE,
            none_value_reason='n',
            improvement_direction=improvement_direction.UP)
        self.assertTrue(v1.IsMergableWith(v0))

        vM = (list_of_scalar_values.ListOfScalarValues.
              MergeLikeValuesFromSamePage([v0, v1]))
        self.assertEquals(None, vM.values)
        self.assertEquals(none_values.MERGE_FAILURE_REASON,
                          vM.none_value_reason)
        self.assertEquals(improvement_direction.UP, vM.improvement_direction)
Esempio n. 11
0
    def testListSamePageMergingWithSamePageConcatenatePolicy(self):
        page0 = self.pages[0]
        v0 = list_of_scalar_values.ListOfScalarValues(
            page0,
            'x',
            'unit', [10, 9, 9, 7],
            same_page_merge_policy=value.CONCATENATE)
        v1 = list_of_scalar_values.ListOfScalarValues(
            page0,
            'x',
            'unit', [300, 302, 303, 304],
            same_page_merge_policy=value.CONCATENATE)
        self.assertTrue(v1.IsMergableWith(v0))

        vM = (list_of_scalar_values.ListOfScalarValues.
              MergeLikeValuesFromSamePage([v0, v1]))
        self.assertEquals(page0, vM.page)
        self.assertEquals('x', vM.name)
        self.assertEquals('unit', vM.units)
        self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
        self.assertEquals(True, vM.important)
        self.assertEquals([10, 9, 9, 7, 300, 302, 303, 304], vM.values)
        # SQRT((19/12 * 3 + 35/12 * 3)/6) = 1.5
        self.assertAlmostEqual(1.5, vM.std)
Esempio n. 12
0
  def testPagesetRepeat(self):
    story_set = story_module.StorySet()

    # TODO(eakuefner): Factor this out after flattening page ref in Value
    blank_story = DummyLocalStory(TestSharedPageState, name='blank')
    green_story = DummyLocalStory(TestSharedPageState, name='green')
    story_set.AddStory(blank_story)
    story_set.AddStory(green_story)

    self.options.pageset_repeat = 2
    self.options.output_formats = []
    results = results_options.CreateResults(
        EmptyMetadataForTest(), self.options)
    story_runner.Run(
        _Measurement(), story_set, self.options, results,
        metadata=EmptyMetadataForTest())
    summary = summary_module.Summary(results.all_page_specific_values)
    values = summary.interleaved_computed_per_page_values_and_summaries

    blank_value = list_of_scalar_values.ListOfScalarValues(
        blank_story, 'metric', 'unit', [1, 3],
        improvement_direction=improvement_direction.UP)
    green_value = list_of_scalar_values.ListOfScalarValues(
        green_story, 'metric', 'unit', [2, 4],
        improvement_direction=improvement_direction.UP)
    merged_value = list_of_scalar_values.ListOfScalarValues(
        None, 'metric', 'unit',
        [1, 3, 2, 4], std=math.sqrt(2),  # Pooled standard deviation.
        improvement_direction=improvement_direction.UP)

    self.assertEquals(4, GetNumberOfSuccessfulPageRuns(results))
    self.assertEquals(0, len(results.failures))
    self.assertEquals(3, len(values))
    self.assertIn(blank_value, values)
    self.assertIn(green_value, values)
    self.assertIn(merged_value, values)
Esempio n. 13
0
    def ValidateAndMeasurePage(self, page, tab, results):
        trace_cpu_time_metrics = {}
        if tab.EvaluateJavaScript('testRunner.tracingCategories'):
            trace_data = tab.browser.platform.tracing_controller.StopTracing(
            )[0]
            # TODO(#763375): Rely on results.telemetry_info.trace_local_path/etc.
            kwargs = {}
            if hasattr(results.telemetry_info, 'trace_local_path'):
                kwargs['file_path'] = results.telemetry_info.trace_local_path
                kwargs[
                    'remote_path'] = results.telemetry_info.trace_remote_path
                kwargs['upload_bucket'] = results.telemetry_info.upload_bucket
                kwargs['cloud_url'] = results.telemetry_info.trace_remote_url
            trace_value = trace.TraceValue(page, trace_data, **kwargs)
            results.AddValue(trace_value)

            trace_events_to_measure = tab.EvaluateJavaScript(
                'window.testRunner.traceEventsToMeasure')
            model = model_module.TimelineModel(trace_data)
            renderer_thread = model.GetRendererThreadFromTabId(tab.id)
            trace_cpu_time_metrics = _ComputeTraceEventsThreadTimeForBlinkPerf(
                model, renderer_thread, trace_events_to_measure)

        log = tab.EvaluateJavaScript(
            'document.getElementById("log").innerHTML')

        for line in log.splitlines():
            if line.startswith("FATAL: "):
                print line
                continue
            if not line.startswith('values '):
                continue
            parts = line.split()
            values = [float(v.replace(',', '')) for v in parts[1:-1]]
            units = parts[-1]
            metric = page.name.split('.')[0].replace('/', '_')
            if values:
                results.AddValue(
                    list_of_scalar_values.ListOfScalarValues(
                        results.current_page, metric, units, values))
            else:
                raise legacy_page_test.MeasurementFailure('Empty test results')

            break

        print log

        self.PrintAndCollectTraceEventMetrics(trace_cpu_time_metrics, results)
Esempio n. 14
0
    def ValidateAndMeasurePage(self, page, tab, results):
        del page  # unused
        tab.WaitForJavaScriptCondition(
            '!document.getElementById("start-performance-tests").disabled',
            timeout=60)

        tab.ExecuteJavaScript("""
        window.__results = {};
        window.console.log = function(str) {
            if (!str) return;
            var key_val = str.split(': ');
            if (!key_val.length == 2) return;
            __results[key_val[0]] = key_val[1];
        };
        document.getElementById('start-performance-tests').click();
        """)

        num_results = 0
        num_tests_in_spaceport = 24
        while num_results < num_tests_in_spaceport:
            tab.WaitForJavaScriptCondition(
                'Object.keys(window.__results).length > {{ num_results }}',
                num_results=num_results,
                timeout=180)
            num_results = tab.EvaluateJavaScript(
                'Object.keys(window.__results).length')
            logging.info('Completed test %d of %d', num_results,
                         num_tests_in_spaceport)

        result_dict = json.loads(
            tab.EvaluateJavaScript('JSON.stringify(window.__results)'))
        for key in result_dict:
            chart, trace = key.split('.', 1)
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   '%s.%s' % (chart, trace),
                                   'objects (bigger is better)',
                                   float(result_dict[key]),
                                   important=False,
                                   description=DESCRIPTIONS.get(chart)))
        results.AddValue(
            list_of_scalar_values.ListOfScalarValues(
                results.current_page,
                'Score',
                'objects (bigger is better)',
                [float(x) for x in result_dict.values()],
                description=
                'Combined score for all parts of the spaceport benchmark.'))
Esempio n. 15
0
 def PrintAndCollectTraceEventMetrics(self, trace_cpu_time_metrics, results):
   unit = 'ms'
   print
   for trace_event_name, cpu_times in trace_cpu_time_metrics.iteritems():
     print 'CPU times of trace event "%s":' % trace_event_name
     cpu_times_string = ', '.join(['{0:.10f}'.format(t) for t in cpu_times])
     print 'values %s %s' % (cpu_times_string, unit)
     avg = 0.0
     if cpu_times:
       avg = sum(cpu_times)/len(cpu_times)
     print 'avg', '{0:.10f}'.format(avg), unit
     results.AddValue(list_of_scalar_values.ListOfScalarValues(
         results.current_page, name=trace_event_name, units=unit,
         values=cpu_times))
     print
   print '\n'
Esempio n. 16
0
  def ValidateAndMeasurePage(self, page, tab, results):
    media_metric = tab.EvaluateJavaScript('window.__testMetrics')
    trace = media_metric['id'] if 'id' in media_metric else None
    metrics = media_metric['metrics'] if 'metrics' in media_metric else []
    for m in metrics:
      trace_name = '%s.%s' % (m, trace)
      if isinstance(metrics[m], list):
        results.AddValue(list_of_scalar_values.ListOfScalarValues(
                results.current_page, trace_name, units='ms',
                values=[float(v) for v in metrics[m]],
                important=True))

      else:
        results.AddValue(scalar.ScalarValue(
                results.current_page, trace_name, units='ms',
                value=float(metrics[m]), important=True))
Esempio n. 17
0
 def _AddResultsInternal(self, events, interactions, results):
   layouts = []
   for event in events:
     if (event.name == self.EVENT_NAME) and any(
             interaction.start <= event.start <= interaction.end
             for interaction in interactions):
       layouts.append(event.end - event.start)
   if not layouts:
     return
   results.AddValue(list_of_scalar_values.ListOfScalarValues(
     page=results.current_page,
     name='layout',
     units='ms',
     values=layouts,
     description=('List of durations of layouts that were caused by and '
                  'start during interactions')))
    def testNoneValueAsDict(self):
        v = list_of_scalar_values.ListOfScalarValues(
            None,
            'x',
            'unit',
            None,
            important=False,
            none_value_reason='n',
            improvement_direction=improvement_direction.UP)
        d = v.AsDictWithoutBaseClassEntries()

        self.assertEquals(d, {
            'values': None,
            'none_value_reason': 'n',
            'std': None
        })
    def testRepr(self):
        page = self.pages[0]
        v = list_of_scalar_values.ListOfScalarValues(
            page,
            'x',
            'unit', [10, 9, 9, 7],
            important=True,
            description='desc',
            std=42,
            improvement_direction=improvement_direction.DOWN)

        expected = ('ListOfScalarValues(http://www.bar.com/, x, unit, '
                    '[10, 9, 9, 7], important=True, description=desc, '
                    'std=42, improvement_direction=down)')

        self.assertEquals(expected, str(v))
Esempio n. 20
0
 def _AddJitterResultsInternal(self, events, interactions, results):
     jitters = []
     for event in events:
         if timeline_based_metric.IsEventInInteractions(
                 event, interactions):
             jitters.append(event.args['value'])
     if jitters:
         results.AddValue(
             list_of_scalar_values.ListOfScalarValues(
                 page=results.current_page,
                 tir_label=interactions[0].label,
                 name='jitter-amount',
                 units='score',
                 values=jitters,
                 description='Jitter each frame',
                 improvement_direction=improvement_direction.DOWN))
Esempio n. 21
0
 def ReportResultsForProcess(memory_dumps, process_name):
     if not memory_dumps:
         metric_values = dict.fromkeys(DEFAULT_METRICS)
         none_reason = 'No memory dumps with mmaps found within interactions'
     else:
         metric_values = _AggregateDicts(dump.GetMemoryUsage()
                                         for dump in memory_dumps)
         none_reason = None
     for metric, values in metric_values.iteritems():
         results.AddValue(
             list_of_scalar_values.ListOfScalarValues(
                 page=results.current_page,
                 name='memory_%s_%s' % (metric, process_name),
                 units='bytes',
                 values=values,
                 none_value_reason=none_reason))
Esempio n. 22
0
    def testNoneValueAsDict(self):
        v = list_of_scalar_values.ListOfScalarValues(
            None,
            'x',
            'unit',
            None,
            same_page_merge_policy=value.PICK_FIRST,
            important=False,
            none_value_reason='n')
        d = v.AsDictWithoutBaseClassEntries()

        self.assertEquals(d, {
            'values': None,
            'none_value_reason': 'n',
            'std': None
        })
Esempio n. 23
0
    def testBasicSummaryWithOnlyOnePage(self):
        page0 = self.pages[0]

        results = page_test_results.PageTestResults()

        results.WillRunPage(page0)
        v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3)
        results.AddValue(v0)
        results.DidRunPage(page0)

        summary = summary_module.Summary(results.all_page_specific_values)
        values = summary.interleaved_computed_per_page_values_and_summaries

        v0_list = list_of_scalar_values.ListOfScalarValues(
            None, 'a', 'seconds', [3])

        self.assertEquals([v0_list], values)
Esempio n. 24
0
    def _ComputeFrameTimeMetric(self, page, stats):
        """Returns Values for the frame time metrics.

    This includes the raw and mean frame times, as well as the percentage of
    frames that were hitting 60 fps.
    """
        frame_times = None
        mean_frame_time = None
        percentage_smooth = None
        none_value_reason = None
        if self._HasEnoughFrames(stats.frame_timestamps):
            frame_times = perf_tests_helper.FlattenList(stats.frame_times)
            mean_frame_time = round(statistics.ArithmeticMean(frame_times), 3)
            # We use 17ms as a somewhat looser threshold, instead of 1000.0/60.0.
            smooth_threshold = 17.0
            smooth_count = sum(1 for t in frame_times if t < smooth_threshold)
            percentage_smooth = float(smooth_count) / len(frame_times) * 100.0
        else:
            none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
        return (list_of_scalar_values.ListOfScalarValues(
            page,
            'frame_times',
            'ms',
            frame_times,
            description='List of raw frame times, helpful to understand the '
            'other metrics.',
            none_value_reason=none_value_reason,
            improvement_direction=improvement_direction.DOWN),
                scalar.ScalarValue(
                    page,
                    'mean_frame_time',
                    'ms',
                    mean_frame_time,
                    description='Arithmetic mean of frame times.',
                    none_value_reason=none_value_reason,
                    improvement_direction=improvement_direction.DOWN),
                scalar.ScalarValue(
                    page,
                    'percentage_smooth',
                    'score',
                    percentage_smooth,
                    description=
                    'Percentage of frames that were hitting 60 fps.',
                    none_value_reason=none_value_reason,
                    improvement_direction=improvement_direction.UP))
Esempio n. 25
0
  def _MergeLikeValues(cls, values, page, name, grouping_keys):
    v0 = values[0]

    merged_value = [v.value for v in values]
    none_value_reason = None
    if None in merged_value:
      merged_value = None
      merged_none_values = [v for v in values if v.value is None]
      none_value_reason = (
          none_values.MERGE_FAILURE_REASON +
          ' None values: %s' % repr(merged_none_values))
    return list_of_scalar_values.ListOfScalarValues(
        page, name, v0.units, merged_value, important=v0.important,
        description=v0.description,
        tir_label=value_module.MergedTirLabel(values),
        none_value_reason=none_value_reason,
        improvement_direction=v0.improvement_direction,
        grouping_keys=grouping_keys)
Esempio n. 26
0
  def ValidateAndMeasurePage(self, page, tab, results):
    tab.WaitForJavaScriptExpression('testRunner.isDone', 600)

    log = tab.EvaluateJavaScript('document.getElementById("log").innerHTML')

    for line in log.splitlines():
      if not line.startswith('values '):
        continue
      parts = line.split()
      values = [float(v.replace(',', '')) for v in parts[1:-1]]
      units = parts[-1]
      metric = page.display_name.split('.')[0].replace('/', '_')
      results.AddValue(list_of_scalar_values.ListOfScalarValues(
          results.current_page, metric, units, values))

      break

    print log
Esempio n. 27
0
 def _MergeLikeValues(cls, values, page, name, tir_label, grouping_keys):
     v0 = values[0]
     merged_value = [v.value for v in values]
     none_value_reason = None
     if None in merged_value:
         merged_value = None
         none_value_reason = none_values.MERGE_FAILURE_REASON
     return list_of_scalar_values.ListOfScalarValues(
         page,
         name,
         v0.units,
         merged_value,
         important=v0.important,
         description=v0.description,
         tir_label=tir_label,
         none_value_reason=none_value_reason,
         improvement_direction=v0.improvement_direction,
         grouping_keys=grouping_keys)
Esempio n. 28
0
 def _AddResultsInternal(self, events, interactions, results):
   events_found = []
   for event in events:
     if (event.name == self._TRACE_EVENT_NAME) and any(
             interaction.start <= event.start <= interaction.end
             for interaction in interactions):
       if event.has_thread_timestamps:
         events_found.append(event.thread_duration)
       else:
         events_found.append(event.duration)
   if not events_found:
     return
   results.AddValue(list_of_scalar_values.ListOfScalarValues(
     page=results.current_page,
     name=self._metric_name,
     units='ms',
     values=events_found,
     description=self._metric_description))
class GPUTimelineMetric(timeline_based_metric.TimelineBasedMetric):
  """Computes GPU based metrics."""

  def __init__(self):
    super(GPUTimelineMetric, self).__init__()

  def AddResults(self, domain.model, _, interaction_records, results):
    self.VerifyNonOverlappedRecords(interaction_records)
    service_times = self._CalculateGPUTimelineData(domain.model)
    for value_item, durations in service_times.iteritems():
      count = len(durations)
      avg = 0.0
      stddev = 0.0
      maximum = 0.0
      if count:
        avg = sum(durations) / count
        stddev = math.sqrt(sum((d - avg) ** 2 for d in durations) / count)
        maximum = max(durations)

      name, src = value_item

      if src:
        frame_times_name = '%s_%s_frame_times' % (name, src)
      else:
        frame_times_name = '%s_frame_times' % (name)

      if durations:
        results.AddValue(list_of_scalar_values.ListOfScalarValues(
            results.current_page, frame_times_name, 'ms', durations,
            tir_label=interaction_records[0].label,
            improvement_direction=improvement_direction.DOWN))

      results.AddValue(scalar.ScalarValue(
          results.current_page, TimelineName(name, src, 'max'), 'ms', maximum,
          tir_label=interaction_records[0].label,
          improvement_direction=improvement_direction.DOWN))
      results.AddValue(scalar.ScalarValue(
          results.current_page, TimelineName(name, src, 'mean'), 'ms', avg,
          tir_label=interaction_records[0].label,
          improvement_direction=improvement_direction.DOWN))
      results.AddValue(scalar.ScalarValue(
          results.current_page, TimelineName(name, src, 'stddev'), 'ms', stddev,
          tir_label=interaction_records[0].label,
          improvement_direction=improvement_direction.DOWN))
Esempio n. 30
0
    def ValidateAndMeasurePage(self, page, tab, results):
        tab.WaitForJavaScriptCondition(
            'testRunner.isDone || testRunner.isWaitingForTracingStart',
            timeout=600)
        trace_cpu_time_metrics = {}
        if tab.EvaluateJavaScript('testRunner.isWaitingForTracingStart'):
            trace_data = self._ContinueTestRunWithTracing(tab)
            trace_value = trace.TraceValue(page, trace_data)
            results.AddValue(trace_value)

            trace_events_to_measure = tab.EvaluateJavaScript(
                'window.testRunner.traceEventsToMeasure')
            model = model_module.TimelineModel(trace_data)
            renderer_thread = model.GetRendererThreadFromTabId(tab.id)
            trace_cpu_time_metrics = _ComputeTraceEventsThreadTimeForBlinkPerf(
                model, renderer_thread, trace_events_to_measure)

        log = tab.EvaluateJavaScript(
            'document.getElementById("log").innerHTML')

        for line in log.splitlines():
            if line.startswith("FATAL: "):
                print line
                continue
            if not line.startswith('values '):
                continue
            parts = line.split()
            values = [float(v.replace(',', '')) for v in parts[1:-1]]
            units = parts[-1]
            metric = page.name.split('.')[0].replace('/', '_')
            if values:
                results.AddValue(
                    list_of_scalar_values.ListOfScalarValues(
                        results.current_page, metric, units, values))
            else:
                raise legacy_page_test.MeasurementFailure('Empty test results')

            break

        print log

        self.PrintAndCollectTraceEventMetrics(trace_cpu_time_metrics, results)