Exemple #1
0
 def AddResultsForSafebrowsing(self, tab, results):
     count = 0
     safebrowsing_count = 0
     for resp in self.IterResponses(tab):
         count += 1
         if resp.IsSafebrowsingResponse():
             safebrowsing_count += 1
         else:
             r = resp.response
             raise ChromeProxyMetricException, (
                 '%s: Not a valid safe browsing response.\n'
                 'Reponse: status=(%d, %s)\nHeaders:\n %s' %
                 (r.url, r.status, r.status_text, r.headers))
     if count == safebrowsing_count:
         results.AddValue(
             scalar.ScalarValue(results.current_page, 'safebrowsing',
                                'boolean', True))
     else:
         raise ChromeProxyMetricException, (
             'Safebrowsing failed (count=%d, safebrowsing_count=%d)\n' %
             (count, safebrowsing_count))
Exemple #2
0
    def ValidateAndMeasurePage(self, page, tab, results):
        tab.WaitForJavaScriptExpression('__pc_load_time', 60)

        chart_name_prefix = ('cold_' if self.IsRunCold(page.url) else 'warm_')

        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                '%stimes.page_load_time' % chart_name_prefix,
                'ms',
                tab.EvaluateJavaScript('__pc_load_time'),
                description='Average page load time. Measured from '
                'performance.timing.navigationStart until the completion '
                'time of a layout after the window.load event. Cold times '
                'are the times when the page is loaded cold, i.e. without '
                'loading it before, and warm times are times when the '
                'page is loaded after being loaded previously.'))

        self._has_loaded_page[page.url] += 1

        self._power_metric.Stop(page, tab)
        self._memory_metric.Stop(page, tab)
        self._memory_metric.AddResults(tab, results)
        self._power_metric.AddResults(tab, results)

        self._cpu_metric.Stop(page, tab)
        self._cpu_metric.AddResults(tab, results)

        if self._report_speed_index:

            def SpeedIndexIsFinished():
                return self._speedindex_metric.IsFinished(tab)

            util.WaitFor(SpeedIndexIsFinished, 60)
            self._speedindex_metric.Stop(page, tab)
            self._speedindex_metric.AddResults(tab,
                                               results,
                                               chart_name=chart_name_prefix +
                                               'speed_index')
        keychain_metric.KeychainMetric().AddResults(tab, results)
    def AddResultsForCorsBypass(self, tab, results):
        eligible_response_count = 0
        bypass_count = 0
        bypasses = {}
        for resp in self.IterResponses(tab):
            logging.warn('got a resource %s' % (resp.response.url))

        for resp in self.IterResponses(tab):
            if resp.ShouldHaveChromeProxyViaHeader():
                eligible_response_count += 1
                if not resp.HasChromeProxyViaHeader():
                    bypass_count += 1
                elif resp.response.status == 502:
                    bypasses[resp.response.url] = 0

        for resp in self.IterResponses(tab):
            if resp.ShouldHaveChromeProxyViaHeader():
                if not resp.HasChromeProxyViaHeader():
                    if resp.response.status == 200:
                        if (bypasses.has_key(resp.response.url)):
                            bypasses[resp.response.
                                     url] = bypasses[resp.response.url] + 1

        for url in bypasses:
            if bypasses[url] == 0:
                raise ChromeProxyMetricException, (
                    '%s: Got a 502 without a subsequent 200' % (url))
            elif bypasses[url] > 1:
                raise ChromeProxyMetricException, (
                    '%s: Got a 502 and multiple 200s: %d' %
                    (url, bypasses[url]))
        if bypass_count == 0:
            raise ChromeProxyMetricException, (
                'At least one response should be bypassed. '
                '(eligible_response_count=%d, bypass_count=%d)\n' %
                (eligible_response_count, bypass_count))

        results.AddValue(
            scalar.ScalarValue(results.current_page, 'cors_bypass', 'count',
                               bypass_count))
Exemple #4
0
    def MeasurePage(self, page, tab, results):
        media_metric = tab.EvaluateJavaScript('window.__testMetrics')
        trace = media_metric['id'] if 'id' in media_metric else None
        metrics = media_metric['metrics'] if 'metrics' in media_metric else []
        for m in metrics:
            trace_name = '%s.%s' % (m, trace)
            if isinstance(metrics[m], list):
                results.AddValue(
                    list_of_scalar_values.ListOfScalarValues(
                        results.current_page,
                        trace_name,
                        units='ms',
                        values=[float(v) for v in metrics[m]],
                        important=True))

            else:
                results.AddValue(
                    scalar.ScalarValue(results.current_page,
                                       trace_name,
                                       units='ms',
                                       value=float(metrics[m]),
                                       important=True))
Exemple #5
0
    def AddResultsForBypass(self, tab, results):
        bypass_count = 0
        for resp in self.IterResponses(tab):
            if resp.HasChromeProxyViaHeader():
                r = resp.response
                raise ChromeProxyMetricException, (
                    '%s: Should not have Via header (%s) (refer=%s, status=%d)'
                    % (r.url, r.GetHeader('Via'), r.GetHeader('Referer'),
                       r.status))
            bypass_count += 1

        if tab:
            info = GetProxyInfoFromNetworkInternals(tab)
            if not info['enabled']:
                raise ChromeProxyMetricException, (
                    'Chrome proxy should be enabled. proxy info: %s' % info)
            _, expected_bad_proxies = self.IsProxyBypassed(tab)
            self.VerifyBadProxies(info['badProxies'], expected_bad_proxies)

        results.AddValue(
            scalar.ScalarValue(results.current_page, 'bypass', 'count',
                               bypass_count))
Exemple #6
0
    def testAsDictWithOnePage(self):
        results = page_test_results.PageTestResults()
        results.telemetry_info.benchmark_start_epoch = 1501773200
        results.telemetry_info.benchmark_name = 'benchmark_name'
        results.WillRunPage(self._story_set[0])
        v0 = scalar.ScalarValue(
            results.current_page,
            'foo',
            'seconds',
            3,
            improvement_direction=improvement_direction.DOWN)
        results.AddValue(v0)
        results.DidRunPage(self._story_set[0])

        d = json_3_output_formatter.ResultsAsDict(results)

        self.assertTrue(_HasBenchmark(d['tests'], 'benchmark_name'))
        self.assertTrue(_HasStory(d['tests']['benchmark_name'], 'Foo'))
        story_result = d['tests']['benchmark_name']['Foo']
        self.assertEquals(story_result['actual'], 'PASS')
        self.assertEquals(story_result['expected'], 'PASS')
        self.assertEquals(d['num_failures_by_type'], {'PASS': 1})
  def _ComputeFrameTimeDiscrepancy(self, page, stats):
    """Returns a Value for the absolute discrepancy of frame time stamps."""

    frame_discrepancy = None
    none_value_reason = None
    if self._HasEnoughFrames(stats.frame_timestamps):
      frame_discrepancy = round(statistics.TimestampsDiscrepancy(
          stats.frame_timestamps), 4)
    else:
      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
    return scalar.ScalarValue(
        page, 'frame_time_discrepancy', 'ms', frame_discrepancy,
        description='Absolute discrepancy of frame time stamps, where '
                    'discrepancy is a measure of irregularity. It quantifies '
                    'the worst jank. For a single pause, discrepancy '
                    'corresponds to the length of this pause in milliseconds. '
                    'Consecutive pauses increase the discrepancy. This metric '
                    'is important because even if the mean and 95th '
                    'percentile are good, one long pause in the middle of an '
                    'interaction is still bad.',
        none_value_reason=none_value_reason,
        improvement_direction=improvement_direction.DOWN)
Exemple #8
0
    def testPopulateHistogramSet_UsesScalarValueData(self):
        results = page_test_results.PageTestResults()
        results.telemetry_info.benchmark_start_epoch = 1501773200
        results.WillRunPage(self.pages[0])
        results.AddValue(
            scalar.ScalarValue(self.pages[0],
                               'a',
                               'seconds',
                               3,
                               improvement_direction=improvement_direction.UP))
        results.DidRunPage(self.pages[0])
        results.CleanUp()

        benchmark_metadata = benchmark.BenchmarkMetadata(
            'benchmark_name', 'benchmark_description')
        results.PopulateHistogramSet(benchmark_metadata)

        histogram_dicts = results.AsHistogramDicts()
        self.assertEquals(1, len(histogram_dicts))

        h = histogram_module.Histogram.FromDict(histogram_dicts[0])
        self.assertEquals('a', h.name)
Exemple #9
0
    def _AddResultsInternal(self, events, interactions, results, inputs):
        times_by_event_id = collections.defaultdict(list)

        for event in events:
            if not any(interaction.start <= event.start <= interaction.end
                       for interaction in interactions):
                continue
            event_id = TraceEventStatsInput.GetEventId(event.category,
                                                       event.name)
            times_by_event_id[event_id].append(
                self.ThreadDurationIfPresent(event))

        if not times_by_event_id:
            return

        inputs_by_event_id = dict([[input_obj.event_id, input_obj]
                                   for input_obj in inputs])

        for (event_name, times) in times_by_event_id.iteritems():
            input_for_event = inputs_by_event_id[event_name]
            name = input_for_event.metric_name
            results.AddValue(
                scalar.ScalarValue(page=results.current_page,
                                   tir_label=interactions[0].label,
                                   name=name + '-count',
                                   units='count',
                                   value=len(times),
                                   description='The number of times ' + name +
                                   ' was recorded.'))
            if len(times) == 0:
                continue
            results.AddValue(
                list_of_scalar_values.ListOfScalarValues(
                    page=results.current_page,
                    tir_label=interactions[0].label,
                    name=name,
                    units=input_for_event.units,
                    values=times,
                    description=input_for_event.metric_description))
    def testSkipValueCannotBeFiltered(self):
        def AcceptValueNamed_a(value, _):
            return value.name == 'a'

        results = page_test_results.PageTestResults(
            value_can_be_added_predicate=AcceptValueNamed_a)
        results.WillRunPage(self.pages[0])
        skip_value = skip.SkipValue(self.pages[0], 'skip for testing')
        results.AddValue(
            scalar.ScalarValue(self.pages[0],
                               'b',
                               'seconds',
                               8,
                               improvement_direction=improvement_direction.UP))
        results.AddValue(skip_value)
        results.DidRunPage(self.pages[0])
        results.PrintSummary()

        # Although predicate says only accept value with named 'a', skip value is
        # added anyway.
        self.assertEquals(len(results.all_page_specific_values), 1)
        self.assertIn(skip_value, results.all_page_specific_values)
Exemple #11
0
    def testPopulateHistogramSet_UsesScalarValueData(self):
        benchmark_metadata = benchmark.BenchmarkMetadata(
            'benchmark_name', 'benchmark_description')
        results = page_test_results.PageTestResults(
            benchmark_metadata=benchmark_metadata)
        results.telemetry_info.benchmark_start_epoch = 1501773200
        results.WillRunPage(self.pages[0])
        results.AddValue(
            scalar.ScalarValue(self.pages[0],
                               'a',
                               'seconds',
                               3,
                               improvement_direction=improvement_direction.UP))
        results.DidRunPage(self.pages[0])
        results.CleanUp()

        results.PopulateHistogramSet()

        hs = histogram_set.HistogramSet()
        hs.ImportDicts(results.AsHistogramDicts())
        self.assertEquals(1, len(hs))
        self.assertEquals('a', hs.GetFirstHistogram().name)
Exemple #12
0
 def _ComputeFirstGestureScrollUpdateLatency(self, page, stats):
     """Returns a Value for the first gesture scroll update latency."""
     first_gesture_scroll_update_latency = None
     none_value_reason = None
     if self._HasEnoughFrames(stats.frame_timestamps):
         latency_list = FlattenList(stats.gesture_scroll_update_latency)
         if len(latency_list) == 0:
             return ()
         first_gesture_scroll_update_latency = round(latency_list[0], 4)
     else:
         none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
     return (scalar.ScalarValue(
         page,
         'first_gesture_scroll_update_latency',
         'ms',
         first_gesture_scroll_update_latency,
         description=
         'First gesture scroll update latency measures the time it '
         'takes to process the very first gesture scroll update '
         'input event. The first scroll gesture can often get '
         'delayed by work related to page loading.',
         none_value_reason=none_value_reason), )
Exemple #13
0
    def _ComputeMeanPixelsApproximated(self, page, stats):
        """Add the mean percentage of pixels approximated.

    This looks at tiles which are missing or of low or non-ideal resolution.
    """
        mean_pixels_approximated = None
        none_value_reason = None
        if self._HasEnoughFrames(stats.frame_timestamps):
            mean_pixels_approximated = round(
                statistics.ArithmeticMean(
                    perf_tests_helper.FlattenList(
                        stats.approximated_pixel_percentages)), 3)
        else:
            none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
        return scalar.ScalarValue(
            page,
            'mean_pixels_approximated',
            'percent',
            mean_pixels_approximated,
            description='Percentage of pixels that were approximated '
            '(checkerboarding, low-resolution tiles, etc.).',
            none_value_reason=none_value_reason)
Exemple #14
0
    def ValidateAndMeasurePage(self, page, tab, results):
        tab.WaitForJavaScriptCondition(
            'document.title.indexOf("Results") != -1', timeout=700)
        tab.WaitForDocumentReadyStateToBeComplete()

        self._power_metric.Stop(page, tab)
        self._power_metric.AddResults(tab, results)

        result_dict = json.loads(
            tab.EvaluateJavaScript("""
        var formElement = document.getElementsByTagName("input")[0];
        decodeURIComponent(formElement.value.split("?")[1]);
        """))
        total = 0
        for key in result_dict:
            if key == 'v':
                continue
            results.AddValue(
                list_of_scalar_values.ListOfScalarValues(
                    results.current_page,
                    key,
                    'ms',
                    result_dict[key],
                    important=False,
                    description=DESCRIPTIONS.get(key)))
            total += _Mean(result_dict[key])

        # TODO(tonyg/nednguyen): This measurement shouldn't calculate Total. The
        # results system should do that for us.
        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                'Total',
                'ms',
                total,
                description='Total of the means of the results for each type '
                'of benchmark in [Mozilla\'s Kraken JavaScript benchmark]'
                '(http://krakenbenchmark.mozilla.org/)'))
Exemple #15
0
    def _ComputeFrameTimeMetric(self, prefix, page, frame_timestamps,
                                frame_times):
        """Returns Values for the frame time metrics.

    This includes the raw and mean frame times, as well as the percentage of
    frames that were hitting 60 fps.
    """
        flatten_frame_times = None
        percentage_smooth = None
        none_value_reason = None
        if self._HasEnoughFrames(frame_timestamps):
            flatten_frame_times = perf_tests_helper.FlattenList(frame_times)
            # We use 17ms as a somewhat looser threshold, instead of 1000.0/60.0.
            smooth_threshold = 17.0
            smooth_count = sum(1 for t in flatten_frame_times
                               if t < smooth_threshold)
            percentage_smooth = float(smooth_count) / len(
                flatten_frame_times) * 100.0
        else:
            none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
        return (list_of_scalar_values.ListOfScalarValues(
            page,
            '%sframe_times' % prefix,
            'ms',
            flatten_frame_times,
            description='List of raw frame times, helpful to understand the '
            'other metrics.',
            none_value_reason=none_value_reason,
            improvement_direction=improvement_direction.DOWN),
                scalar.ScalarValue(
                    page,
                    '%spercentage_smooth' % prefix,
                    'score',
                    percentage_smooth,
                    description=
                    'Percentage of frames that were hitting 60 fps.',
                    none_value_reason=none_value_reason,
                    improvement_direction=improvement_direction.UP))
    def testStoryRunSkipped(self):
        run = story_run.StoryRun(self.story)
        run.SetFailed('oops')
        run.Skip('test', is_expected=True)
        self.assertFalse(run.ok)
        self.assertFalse(run.failed)
        self.assertTrue(run.skipped)
        self.assertEquals(run.expected, 'SKIP')
        self.assertEquals(run.failure_str, 'oops')

        run = story_run.StoryRun(self.story)
        run.AddValue(
            scalar.ScalarValue(self.story,
                               'a',
                               's',
                               1,
                               improvement_direction=improvement_direction.UP))
        run.Skip('test', is_expected=False)
        self.assertFalse(run.ok)
        self.assertFalse(run.failed)
        self.assertTrue(run.skipped)
        self.assertEquals(run.expected, 'PASS')
        self.assertEquals(run.failure_str, None)
def TranslateScalarValue(scalar_value, page):
    # Parses scalarDicts created by:
    #   tracing/tracing/metrics/metric_map_function.html
    # back into ScalarValue's.
    value = scalar_value['numeric']['value']
    none_value_reason = None
    if value is None:
        none_value_reason = 'Common scalar contained None'
    elif value in ['Infinity', '-Infinity', 'NaN']:
        none_value_reason = 'value was %s' % value
        value = None
    name = scalar_value['name']
    unit_parts = scalar_value['numeric']['unit'].split('_')
    if len(unit_parts) != 2:
        raise ValueError('Must specify improvement direction for value ' +
                         name)
    return scalar.ScalarValue(page,
                              name,
                              unit_parts[0],
                              value,
                              description=scalar_value['description'],
                              none_value_reason=none_value_reason,
                              improvement_direction=_DIRECTION[unit_parts[1]])
    def testFailureValueCannotBeFiltered(self):
        def AcceptValueNamed_a(value, _):
            return value.name == 'a'

        results = page_test_results.PageTestResults(
            value_can_be_added_predicate=AcceptValueNamed_a)
        results.WillRunPage(self.pages[0])
        results.AddValue(
            scalar.ScalarValue(self.pages[0],
                               'b',
                               'seconds',
                               8,
                               improvement_direction=improvement_direction.UP))
        failure_value = failure.FailureValue.FromMessage(
            self.pages[0], 'failure')
        results.AddValue(failure_value)
        results.DidRunPage(self.pages[0])
        results.PrintSummary()

        # Although predicate says only accept values named 'a', the failure value is
        # added anyway.
        self.assertEquals(len(results.all_page_specific_values), 1)
        self.assertIn(failure_value, results.all_page_specific_values)
  def AddResultsForHTTPSBypass(self, tab, results):
    bypass_count = 0

    for resp in self.IterResponses(tab):
      # Only check https url's
      if "https://" not in resp.response.url:
        continue

      # If a Chrome Proxy Via appears fail the test
      if resp.HasChromeProxyViaHeader():
        r = resp.response
        raise ChromeProxyMetricException, (
            '%s: Should not have Via header (%s) (refer=%s, status=%d)' % (
                r.url, r.GetHeader('Via'), r.GetHeader('Referer'), r.status))
      bypass_count += 1

    if bypass_count == 0:
      raise ChromeProxyMetricException, (
          'Expected at least one https response was expected, but zero such '
          'responses were received.')

    results.AddValue(scalar.ScalarValue(
        results.current_page, 'bypass', 'count', bypass_count))
Exemple #20
0
  def ValidateAndMeasurePage(self, page, tab, results):
    tab.WaitForJavaScriptExpression(
        '!document.getElementById("start-performance-tests").disabled', 60)

    tab.ExecuteJavaScript("""
        window.__results = {};
        window.console.log = function(str) {
            if (!str) return;
            var key_val = str.split(': ');
            if (!key_val.length == 2) return;
            __results[key_val[0]] = key_val[1];
        };
        document.getElementById('start-performance-tests').click();
        """)

    num_results = 0
    num_tests_in_spaceport = 24
    while num_results < num_tests_in_spaceport:
      tab.WaitForJavaScriptExpression(
          'Object.keys(window.__results).length > %d' % num_results, 180)
      num_results = tab.EvaluateJavaScript(
          'Object.keys(window.__results).length')
      logging.info('Completed test %d of %d' %
                   (num_results, num_tests_in_spaceport))

    result_dict = eval(tab.EvaluateJavaScript(
        'JSON.stringify(window.__results)'))
    for key in result_dict:
      chart, trace = key.split('.', 1)
      results.AddValue(scalar.ScalarValue(
          results.current_page, '%s.%s'% (chart, trace),
          'objects (bigger is better)', float(result_dict[key]),
          important=False, description=DESCRIPTIONS.get(chart)))
    results.AddValue(list_of_scalar_values.ListOfScalarValues(
        results.current_page, 'Score', 'objects (bigger is better)',
        [float(x) for x in result_dict.values()],
        description='Combined score for all parts of the spaceport benchmark.'))
Exemple #21
0
    def AddResultsForHTTPFallback(self,
                                  tab,
                                  results,
                                  expected_proxies=None,
                                  expected_bad_proxies=None):
        info = GetProxyInfoFromNetworkInternals(tab)
        if not 'enabled' in info or not info['enabled']:
            raise ChromeProxyMetricException, (
                'Chrome proxy should be enabled. proxy info: %s' % info)

        if not expected_proxies:
            expected_proxies = [
                self.effective_proxies['fallback'],
                self.effective_proxies['direct']
            ]
        if not expected_bad_proxies:
            expected_bad_proxies = []

        proxies = info['proxies']
        if proxies != expected_proxies:
            raise ChromeProxyMetricException, (
                'Wrong effective proxies (%s). Expect: "%s"' %
                (str(proxies), str(expected_proxies)))

        bad_proxies = []
        if 'badProxies' in info and info['badProxies']:
            bad_proxies = [
                p['proxy'] for p in info['badProxies']
                if 'proxy' in p and p['proxy']
            ]
        if bad_proxies != expected_bad_proxies:
            raise ChromeProxyMetricException, (
                'Wrong bad proxies (%s). Expect: "%s"' %
                (str(bad_proxies), str(expected_bad_proxies)))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'http_fallback',
                               'boolean', True))
Exemple #22
0
        def AddOneResult(metric, unit):
            if metric in exclude_metrics:
                return

            metrics = media_metric['metrics']
            for m in metrics:
                if m.startswith(metric):
                    special_label = m[len(metric):]
                    trace_name = '%s.%s%s' % (metric, trace, special_label)
                    if isinstance(metrics[m], list):
                        results.AddValue(
                            list_of_scalar_values.ListOfScalarValues(
                                results.current_page,
                                trace_name,
                                unit,
                                values=[float(v) for v in metrics[m]],
                                important=True))
                    else:
                        results.AddValue(
                            scalar.ScalarValue(results.current_page,
                                               trace_name,
                                               unit,
                                               value=float(metrics[m]),
                                               important=True))
  def testBasicSummaryWithOnlyOnePage(self):
    page0 = self.pages[0]

    results = self.getPageTestResults()

    results.WillRunPage(page0)
    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
                            improvement_direction=improvement_direction.UP)
    results.AddValue(v0)
    results.DidRunPage(page0)

    summary = summary_module.Summary(results)
    values = summary.interleaved_computed_per_page_values_and_summaries

    v0_list = list_of_scalar_values.ListOfScalarValues(
        page0, 'a', 'seconds', [3],
        improvement_direction=improvement_direction.UP)
    merged_list = list_of_scalar_values.ListOfScalarValues(
        None, 'a', 'seconds', [3],
        improvement_direction=improvement_direction.UP)

    self.assertEquals(2, len(values))
    self.assertIn(v0_list, values)
    self.assertIn(merged_list, values)
    def testBasic(self):
        formatter = html_output_formatter.HtmlOutputFormatter(
            self._output, reset_results=False)
        results = page_test_results.PageTestResults()

        results.WillRunPage(self._story_set[0])
        v0 = scalar.ScalarValue(
            results.current_page,
            'foo',
            'seconds',
            3,
            improvement_direction=improvement_direction.DOWN)
        results.AddValue(v0)
        results.DidRunPage(self._story_set[0])
        results.PopulateHistogramSet()

        formatter.Format(results)
        html = self._output.getvalue()
        dicts = render_histograms_viewer.ReadExistingResults(html)
        histograms = histogram_set.HistogramSet()
        histograms.ImportDicts(dicts)

        self.assertEqual(len(histograms), 1)
        self.assertEqual(histograms.GetFirstHistogram().name, 'foo')
Exemple #25
0
    def ValidateAndMeasurePage(self, page, tab, results):
        # Wait until the page has loaded and come to a somewhat steady state.
        # Needs to be adjusted for every device (~2 seconds for workstation).
        time.sleep(self.options.start_wait_time)

        # Enqueue benchmark
        tab.ExecuteJavaScript("""
        window.benchmark_results = {};
        window.benchmark_results.done = false;
        chrome.gpuBenchmarking.runMicroBenchmark(
            "picture_record_benchmark",
            function(value) {
              window.benchmark_results.done = true;
              window.benchmark_results.results = value;
            }, [{width: 1, height: 1},
                {width: 250, height: 250},
                {width: 500, height: 500},
                {width: 750, height: 750},
                {width: 1000, height: 1000},
                {width: 256, height: 1024},
                {width: 1024, height: 256}]);
    """)

        tab.WaitForJavaScriptExpression('window.benchmark_results.done', 300)

        all_data = tab.EvaluateJavaScript('window.benchmark_results.results')
        for data in all_data:
            width = data['width']
            height = data['height']
            area = width * height
            time_ms = data['time_ms']

            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'area_%07d_%dx%d' % (area, width, height),
                                   'ms', time_ms))
    def ValidateAndMeasurePage(self, page, tab, results):
        del page  # unused
        timeline_data = tab.browser.platform.tracing_controller.StopTracing()

        # TODO(charliea): This is part of a three-sided Chromium/Telemetry patch
        # where we're changing the return type of StopTracing from a TraceValue to a
        # (TraceValue, nonfatal_exception_list) tuple. Once the tuple return value
        # lands in Chromium, the non-tuple logic should be deleted.
        if isinstance(timeline_data, tuple):
            timeline_data = timeline_data[0]

        timeline_model = model.TimelineModel(timeline_data)

        pt_avg = self.ComputeAverageOfDurations(
            timeline_model,
            'LayerTreeHostCommon::ComputeVisibleRectsWithPropertyTrees')

        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                'PT_avg_cost',
                'ms',
                pt_avg,
                description='Average time spent processing property trees'))
    def AddResults(self, tab, results, chart_name=None):
        """Calculate the speed index and add it to the results."""
        try:
            if tab.video_capture_supported:
                index = self._impl.CalculateSpeedIndex(tab)
                none_value_reason = None
            else:
                index = None
                none_value_reason = 'Video capture is not supported.'
        finally:
            self._impl = None  # Release the tab so that it can be disconnected.

        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                '%s_speed_index' % chart_name,
                'ms',
                index,
                description=
                'Speed Index. This focuses on time when visible parts of '
                'page are displayed and shows the time when the '
                'first look is "almost" composed. If the contents of the '
                'testing page are composed by only static resources, load '
                'time can measure more accurately and speed index will be '
                'smaller than the load time. On the other hand, If the '
                'contents are composed by many XHR requests with small '
                'main resource and javascript, speed index will be able to '
                'get the features of performance more accurately than load '
                'time because the load time will measure the time when '
                'static resources are loaded. If you want to get more '
                'detail, please refer to http://goo.gl/Rw3d5d. Currently '
                'there are two implementations: for Android and for '
                'Desktop. The Android version uses video capture; the '
                'Desktop one uses paint events and has extra overhead to '
                'catch paint events.',
                none_value_reason=none_value_reason))
    def testDifferentPageMergeSingleValueStillMerges(self):
        page0 = self.pages[0]

        all_values = [
            scalar.ScalarValue(
                page0,
                'x',
                'units',
                1,
                improvement_direction=improvement_direction.DOWN)
        ]

        # Sort the results so that their order is predictable for the subsequent
        # assertions.
        merged_values = merge_values.MergeLikeValuesFromDifferentPages(
            all_values)
        self.assertEquals(1, len(merged_values))

        self.assertEquals((None, 'x'),
                          (merged_values[0].page, merged_values[0].name))
        self.assertTrue(
            isinstance(merged_values[0],
                       list_of_scalar_values.ListOfScalarValues))
        self.assertEquals([1], merged_values[0].values)
Exemple #29
0
    def AddResults(self, tab, results, trace_name='cpu_utilization'):
        if not self._browser.supports_cpu_metrics:
            return

        assert self._stop_cpu, 'Must call Stop() first'
        cpu_stats = _SubtractCpuStats(self._stop_cpu, self._start_cpu)

        # FIXME: Renderer process CPU times are impossible to compare correctly.
        # http://crbug.com/419786#c11
        if 'Renderer' in cpu_stats:
            del cpu_stats['Renderer']

        # Add a result for each process type.
        for process_type in cpu_stats:
            trace_name_for_process = '%s_%s' % (trace_name,
                                                process_type.lower())
            cpu_percent = 100 * cpu_stats[process_type]
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'cpu_utilization.%s' %
                                   trace_name_for_process,
                                   '%',
                                   cpu_percent,
                                   important=False))
  def AddResultsForHTTPFallback(self, tab, results):
    via_fallback_count = 0

    for resp in self.IterResponses(tab):
      if resp.ShouldHaveChromeProxyViaHeader():
        # All responses should have come through the HTTP fallback proxy, which
        # means that they should have the via header, and if a remote port is
        # defined, it should be port 80.
        if (not resp.HasChromeProxyViaHeader() or
            (resp.remote_port and resp.remote_port != 80)):
          r = resp.response
          raise ChromeProxyMetricException, (
              '%s: Should have come through the fallback proxy.\n'
              'Reponse: remote_port=%s status=(%d, %s)\nHeaders:\n %s' % (
                  r.url, str(resp.remote_port), r.status, r.status_text,
                  r.headers))
        via_fallback_count += 1

    if via_fallback_count == 0:
      raise ChromeProxyMetricException, (
          'Expected at least one response through the fallback proxy, but zero '
          'such responses were received.')
    results.AddValue(scalar.ScalarValue(
        results.current_page, 'via_fallback', 'count', via_fallback_count))