Beispiel #1
0
  def ValidateAndMeasurePage(self, page, tab, results):
    try:
      tab.WaitForDocumentReadyStateToBeComplete()
    except py_utils.TimeoutException:
      logging.warning("WaitForDocumentReadyStateToBeComplete() timeout, "
                      "page: %s", page.name)
      return

    time.sleep(self._wait_time)

    if not os.path.exists(self._png_outdir):
      logging.info("Creating directory %s", self._png_outdir)
      try:
        os.makedirs(self._png_outdir)
      except OSError:
        logging.warning("Directory %s could not be created", self._png_outdir)
        raise

    outpath = os.path.abspath(
        os.path.join(self._png_outdir, page.file_safe_name)) + '.png'
    # Replace win32 path separator char '\' with '\\'.
    outpath = outpath.replace('\\', '\\\\')

    screenshot = tab.Screenshot()
    image_width = image_util.Width(screenshot)
    image_height = image_util.Height(screenshot)
    num_total_pixels = image_width * image_height
    content_pixels = image_util.Pixels(screenshot)

    # Dynamic content flag.
    if self._dc_detect:
      for i in range(self._dc_extra_screenshots):
        logging.info("Sleeping for %f seconds.", self._dc_wait_time)
        time.sleep(self._dc_wait_time)

        # After the specified wait time, take another screenshot of the page.
        logging.info("Taking extra screenshot %d of %d.", i+1,
                     self._dc_extra_screenshots)
        next_screenshot = tab.Screenshot()

        # Compare this screenshot to the original to mark inconsistent pixels,
        # and check the percentage of dynamic content against the threshold.
        if not IsScreenshotWithinDynamicContentThreshold(
          screenshot, next_screenshot, content_pixels, num_total_pixels,
          self._dc_threshold):
          raise legacy_page_test.MeasurementFailure("Percentage of pixels "
            "with dynamic content is greater than threshold.")

    # Convert the pixel bytearray back into an image.
    image = image_util.FromRGBPixels(image_width, image_height,
                                     content_pixels)

    # TODO(lchoi): Add logging to image_util.py and/or augment error handling of
    # image_util.WritePngFile
    logging.info("Writing PNG file to %s. This may take awhile.", outpath)
    start = time.time()
    image_util.WritePngFile(image, outpath)
    logging.info("PNG file written successfully. (Took %f seconds)",
                 time.time()-start)
    def ValidateAndMeasurePage(self, page, tab, results):
        if tab.browser.platform.GetOSName() in ['android', 'chromeos']:
            raise legacy_page_test.MeasurementFailure(
                'Multipage SkPicture printing not supported on this platform')

        # Replace win32 path separator char '\' with '\\'.
        outpath = os.path.abspath(
            os.path.join(self._mskp_outdir, page.file_safe_name + '.mskp'))
        js = _JS.format(outpath.replace('\\', '\\\\'))
        tab.EvaluateJavaScript(js)
Beispiel #3
0
    def ValidateAndMeasurePage(self, page, tab, results):
        tab.WaitForJavaScriptCondition(
            'testRunner.isDone || testRunner.isWaitingForTracingStart',
            timeout=600)
        trace_cpu_time_metrics = {}
        if tab.EvaluateJavaScript('testRunner.isWaitingForTracingStart'):
            trace_data = self._ContinueTestRunWithTracing(tab)
            # TODO(#763375): Rely on results.telemetry_info.trace_local_path/etc.
            kwargs = {}
            if hasattr(results.telemetry_info, 'trace_local_path'):
                kwargs['file_path'] = results.telemetry_info.trace_local_path
                kwargs[
                    'remote_path'] = results.telemetry_info.trace_remote_path
                kwargs['upload_bucket'] = results.telemetry_info.upload_bucket
                kwargs['cloud_url'] = results.telemetry_info.trace_remote_url
            trace_value = trace.TraceValue(page, trace_data, **kwargs)
            results.AddValue(trace_value)

            trace_events_to_measure = tab.EvaluateJavaScript(
                'window.testRunner.traceEventsToMeasure')
            model = model_module.TimelineModel(trace_data)
            renderer_thread = model.GetRendererThreadFromTabId(tab.id)
            trace_cpu_time_metrics = _ComputeTraceEventsThreadTimeForBlinkPerf(
                model, renderer_thread, trace_events_to_measure)

        log = tab.EvaluateJavaScript(
            'document.getElementById("log").innerHTML')

        for line in log.splitlines():
            if line.startswith("FATAL: "):
                print line
                continue
            if not line.startswith('values '):
                continue
            parts = line.split()
            values = [float(v.replace(',', '')) for v in parts[1:-1]]
            units = parts[-1]
            metric = page.name.split('.')[0].replace('/', '_')
            if values:
                results.AddValue(
                    list_of_scalar_values.ListOfScalarValues(
                        results.current_page, metric, units, values))
            else:
                raise legacy_page_test.MeasurementFailure('Empty test results')

            break

        print log

        self.PrintAndCollectTraceEventMetrics(trace_cpu_time_metrics, results)
    def ValidateAndMeasurePage(self, page, tab, results):
        if tab.browser.platform.GetOSName() in ['android', 'chromeos']:
            raise legacy_page_test.MeasurementFailure(
                'SkPicture printing not supported on this platform')

        outpath = os.path.abspath(
            os.path.join(self._skp_outdir, page.file_safe_name))
        # Replace win32 path separator char '\' with '\\'.
        outpath = outpath.replace('\\', '\\\\')
        tab.EvaluateJavaScript(
            'chrome.gpuBenchmarking.printToSkPicture({{ outpath }});',
            outpath=outpath)
        pictures = glob.glob(os.path.join(outpath, '*.skp'))
        results.AddMeasurement('saved_picture_count', 'count', len(pictures))
    def ValidateAndMeasurePage(self, page, tab, results):
        if tab.browser.platform.GetOSName() in ['android', 'chromeos']:
            raise legacy_page_test.MeasurementFailure(
                'SkPicture printing not supported on this platform')

        # Replace win32 path separator char '\' with '\\'.
        outpath = os.path.abspath(
            os.path.join(self._skp_outdir, page.file_safe_name))
        js = _JS.format(outpath.replace('\\', '\\\\'))
        tab.EvaluateJavaScript(js)
        pictures = glob.glob(os.path.join(outpath, '*.skp'))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'saved_picture_count',
                               'count', len(pictures)))
Beispiel #6
0
    def RunPageInteractions(self, action_runner):
        action_runner.WaitForJavaScriptCondition(
            'document.readyState == "complete"', timeout=30)
        action_runner.ExecuteJavaScript(
            'chrome.gpuBenchmarking.setRasterizeOnlyVisibleContent();')

        mode = 'viewport'
        width = None
        height = None
        args = {}
        args['mode'] = mode
        if width:
            args['width'] = width
        if height:
            args['height'] = height

        # Enqueue benchmark
        action_runner.ExecuteJavaScript("""
        window.benchmark_results = {};
        window.benchmark_results.id =
            chrome.gpuBenchmarking.runMicroBenchmark(
                "invalidation_benchmark",
                function(value) {},
                {{ args }}
            );
        """,
                                        args=args)

        micro_benchmark_id = action_runner.EvaluateJavaScript(
            'window.benchmark_results.id')
        if not micro_benchmark_id:
            raise legacy_page_test.MeasurementFailure(
                'Failed to schedule invalidation_benchmark.')

        with action_runner.CreateInteraction('Repaint'):
            action_runner.RepaintContinuously(seconds=5)

        action_runner.ExecuteJavaScript("""
        window.benchmark_results.message_handled =
            chrome.gpuBenchmarking.sendMessageToMicroBenchmark(
                  {{ micro_benchmark_id }}, {
                    "notify_done": true
                  });
        """,
                                        micro_benchmark_id=micro_benchmark_id)
Beispiel #7
0
def Repaint(action_runner, mode='viewport', width=None, height=None):
    action_runner.WaitForJavaScriptCondition(
        'document.readyState == "complete"', 90)
    # Rasterize only what's visible.
    action_runner.ExecuteJavaScript(
        'chrome.gpuBenchmarking.setRasterizeOnlyVisibleContent();')

    args = {}
    args['mode'] = mode
    if width:
        args['width'] = width
    if height:
        args['height'] = height

    # Enqueue benchmark
    action_runner.ExecuteJavaScript("""
      window.benchmark_results = {};
      window.benchmark_results.id =
          chrome.gpuBenchmarking.runMicroBenchmark(
              "invalidation_benchmark",
              function(value) {},
              """ + str(args) + """
          );
  """)

    micro_benchmark_id = action_runner.EvaluateJavaScript(
        'window.benchmark_results.id')
    if not micro_benchmark_id:
        raise legacy_page_test.MeasurementFailure(
            'Failed to schedule invalidation_benchmark.')

    with action_runner.CreateInteraction('Repaint'):
        action_runner.RepaintContinuously(seconds=5)

    # TODO(catapult:#3028): Fix interpolation of JavaScript values.
    action_runner.ExecuteJavaScript("""
      window.benchmark_results.message_handled =
          chrome.gpuBenchmarking.sendMessageToMicroBenchmark(
              """ + str(micro_benchmark_id) + """, {
                "notify_done": true
              });
  """)
Beispiel #8
0
    def ValidateAndMeasurePage(self, page, tab, results):
        trace_cpu_time_metrics = {}
        if self._is_tracing:
            trace_data = tab.browser.platform.tracing_controller.StopTracing()
            results.AddTraces(trace_data)
            self._is_tracing = False

            trace_events_to_measure = tab.EvaluateJavaScript(
                'window.testRunner.traceEventsToMeasure')
            if trace_events_to_measure:
                model = model_module.TimelineModel(trace_data)
                renderer_thread = model.GetFirstRendererThread(tab.id)
                trace_cpu_time_metrics = _ComputeTraceEventsThreadTimeForBlinkPerf(
                    model, renderer_thread, trace_events_to_measure)

        log = tab.EvaluateJavaScript(
            'document.getElementById("log").innerHTML')

        for line in log.splitlines():
            if line.startswith("FATAL: "):
                print(line)
                continue
            if not line.startswith('values '):
                continue
            parts = line.split()
            values = [float(v.replace(',', '')) for v in parts[1:-1]]
            units = parts[-1]
            metric = page.name.split('.')[0].replace('/', '_')
            if values:
                results.AddMeasurement(metric, units, values)
            else:
                raise legacy_page_test.MeasurementFailure('Empty test results')

            break

        print(log)

        self.PrintAndCollectTraceEventMetrics(trace_cpu_time_metrics, results)
Beispiel #9
0
    def ValidateAndMeasurePage(self, page, tab, results):
        if not tab.screenshot_supported:
            raise legacy_page_test.MeasurementFailure(
                'Screenshotting not supported on this platform')

        try:
            tab.WaitForDocumentReadyStateToBeComplete()
        except py_utils.TimeoutException:
            logging.warning(
                "WaitForDocumentReadyStateToBeComplete() timeout, " +
                "page: %s", page.display_name)
            return

        time.sleep(self._wait_time)

        if not os.path.exists(self._png_outdir):
            logging.info("Creating directory %s", self._png_outdir)
            try:
                os.makedirs(self._png_outdir)
            except OSError:
                logging.warning("Directory %s could not be created",
                                self._png_outdir)
                raise

        outpath = os.path.abspath(
            os.path.join(self._png_outdir, page.file_safe_name)) + '.png'
        # Replace win32 path separator char '\' with '\\'.
        outpath = outpath.replace('\\', '\\\\')

        screenshot = tab.Screenshot()

        # TODO(lchoi): Add logging to image_util.py and/or augment error handling of
        # image_util.WritePngFile
        logging.info("Writing PNG file to %s. This may take awhile.", outpath)
        start = time.time()
        image_util.WritePngFile(screenshot, outpath)
        logging.info("PNG file written successfully. (Took %f seconds)",
                     time.time() - start)
Beispiel #10
0
    def ValidateAndMeasurePage(self, page, tab, results):
        del page  # unused
        try:
            tab.WaitForDocumentReadyStateToBeComplete()
        except exceptions.TimeoutException:
            pass
        time.sleep(self._start_wait_time)

        # Enqueue benchmark
        tab.ExecuteJavaScript("""
        window.benchmark_results = {};
        window.benchmark_results.done = false;
        window.benchmark_results.id =
            chrome.gpuBenchmarking.runMicroBenchmark(
                "rasterize_and_record_benchmark",
                function(value) {
                  window.benchmark_results.done = true;
                  window.benchmark_results.results = value;
                }, {
                  "record_repeat_count": %i,
                  "rasterize_repeat_count": %i
                });
    """ % (self._record_repeat, self._rasterize_repeat))

        benchmark_id = tab.EvaluateJavaScript('window.benchmark_results.id')
        if not benchmark_id:
            raise legacy_page_test.MeasurementFailure(
                'Failed to schedule rasterize_and_record_micro')

        tab.WaitForJavaScriptExpression('window.benchmark_results.done',
                                        self._timeout)

        data = tab.EvaluateJavaScript('window.benchmark_results.results')

        pixels_recorded = data['pixels_recorded']
        record_time = data['record_time_ms']
        pixels_rasterized = data['pixels_rasterized']
        rasterize_time = data['rasterize_time_ms']
        picture_memory_usage = data['picture_memory_usage']

        results.AddValue(
            scalar.ScalarValue(results.current_page, 'pixels_recorded',
                               'pixels', pixels_recorded))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'pixels_rasterized',
                               'pixels', pixels_rasterized))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'rasterize_time', 'ms',
                               rasterize_time))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'viewport_picture_size',
                               'bytes', picture_memory_usage))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'record_time', 'ms',
                               record_time))

        record_time_painting_disabled = data[
            'record_time_painting_disabled_ms']
        record_time_caching_disabled = data['record_time_caching_disabled_ms']
        record_time_construction_disabled = \
            data['record_time_construction_disabled_ms']
        # TODO(wangxianzhu): Remove this workaround when reference builds get past
        # r367465.
        record_time_subsequence_caching_disabled = \
            data.get('record_time_subsequence_caching_disabled_ms', 0)
        # TODO(wkorman): Remove the default-to-zero workaround below when
        # reference builds get past the change that adds this comment.
        record_time_partial_invalidation = \
            data.get('record_time_partial_invalidation_ms', 0)
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_painting_disabled', 'ms',
                               record_time_painting_disabled))
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_caching_disabled', 'ms',
                               record_time_caching_disabled))
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_construction_disabled', 'ms',
                               record_time_construction_disabled))
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_subsequence_caching_disabled',
                               'ms', record_time_subsequence_caching_disabled))
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_partial_invalidation_ms', 'ms',
                               record_time_partial_invalidation))

        if self._report_detailed_results:
            pixels_rasterized_with_non_solid_color = \
                data['pixels_rasterized_with_non_solid_color']
            pixels_rasterized_as_opaque = \
                data['pixels_rasterized_as_opaque']
            total_layers = data['total_layers']
            total_picture_layers = data['total_picture_layers']
            total_picture_layers_with_no_content = \
                data['total_picture_layers_with_no_content']
            total_picture_layers_off_screen = \
                data['total_picture_layers_off_screen']
            # TODO(wkorman): Why are we storing rasterize_results_.total_memory_usage
            # in a field called |total_pictures_in_pile_size|? Did we just repurpose
            # that field to avoid having to rename/create another?
            total_pictures_in_pile_size = data['total_pictures_in_pile_size']

            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'total_size_of_pictures_in_piles', 'bytes',
                                   total_pictures_in_pile_size))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'pixels_rasterized_with_non_solid_color',
                                   'pixels',
                                   pixels_rasterized_with_non_solid_color))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'pixels_rasterized_as_opaque', 'pixels',
                                   pixels_rasterized_as_opaque))
            results.AddValue(
                scalar.ScalarValue(results.current_page, 'total_layers',
                                   'count', total_layers))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'total_picture_layers', 'count',
                                   total_picture_layers))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'total_picture_layers_with_no_content',
                                   'count',
                                   total_picture_layers_with_no_content))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'total_picture_layers_off_screen', 'count',
                                   total_picture_layers_off_screen))
Beispiel #11
0
 def ValidateAndMeasurePage(self, page, tab, results):
     query = tab.EvaluateJavaScript('window.location.search')
     expected = '?foo=1'
     if query.strip() != expected:
         raise legacy_page_test.MeasurementFailure('query was %s, not %s.' %
                                                   (query, expected))
Beispiel #12
0
 def ValidateAndMeasurePage(self, page, tab, results):
     # Web Page Replay returns '404 Not found' if a page is not in the archive.
     contents = tab.EvaluateJavaScript('document.body.textContent')
     if '404 Not Found' in contents.strip():
         raise legacy_page_test.MeasurementFailure('Page not in archive.')
Beispiel #13
0
 def ValidateAndMeasurePage(self, page, tab, results):
     contents = tab.EvaluateJavaScript('document.body.textContent')
     if contents.strip() != 'Hello world':
         raise legacy_page_test.MeasurementFailure('Page contents were: ' +
                                                   contents)
    def ValidateAndMeasurePage(self, page, tab, results):
        del page  # unused
        try:
            tab.WaitForDocumentReadyStateToBeComplete()
        except py_utils.TimeoutException:
            pass
        time.sleep(self._start_wait_time)

        # Enqueue benchmark
        tab.ExecuteJavaScript("""
        window.benchmark_results = {};
        window.benchmark_results.done = false;
        window.benchmark_results.id =
            chrome.gpuBenchmarking.runMicroBenchmark(
                "rasterize_and_record_benchmark",
                function(value) {
                  window.benchmark_results.done = true;
                  window.benchmark_results.results = value;
                }, {
                  "record_repeat_count": {{ record_repeat_count }},
                  "rasterize_repeat_count": {{ rasterize_repeat_count }}
                });
        """,
                              record_repeat_count=self._record_repeat,
                              rasterize_repeat_count=self._rasterize_repeat)

        # Evaluating this expression usually takes between 60 and 90 seconds.
        benchmark_id = tab.EvaluateJavaScript('window.benchmark_results.id',
                                              timeout=self._timeout)
        if not benchmark_id:
            raise legacy_page_test.MeasurementFailure(
                'Failed to schedule rasterize_and_record_micro')

        tab.WaitForJavaScriptCondition('window.benchmark_results.done',
                                       timeout=self._timeout)

        data = tab.EvaluateJavaScript('window.benchmark_results.results')

        pixels_recorded = data['pixels_recorded']
        record_time = data['record_time_ms']
        pixels_rasterized = data['pixels_rasterized']
        rasterize_time = data['rasterize_time_ms']
        painter_memory_usage = data.get('painter_memory_usage', 0)
        paint_op_memory_usage = data.get('paint_op_memory_usage', 0)
        paint_op_count = data.get('paint_op_count', 0)

        results.AddValue(
            scalar.ScalarValue(results.current_page, 'pixels_recorded',
                               'pixels', pixels_recorded))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'pixels_rasterized',
                               'pixels', pixels_rasterized))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'rasterize_time', 'ms',
                               rasterize_time))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'record_time', 'ms',
                               record_time))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'painter_memory_usage',
                               'bytes', painter_memory_usage))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'paint_op_memory_usage',
                               'bytes', paint_op_memory_usage))
        results.AddValue(
            scalar.ScalarValue(results.current_page, 'paint_op_count', 'count',
                               paint_op_count))

        record_time_painting_disabled = data[
            'record_time_painting_disabled_ms']
        record_time_caching_disabled = data['record_time_caching_disabled_ms']
        record_time_construction_disabled = \
            data['record_time_construction_disabled_ms']
        record_time_subsequence_caching_disabled = \
            data['record_time_subsequence_caching_disabled_ms']
        record_time_partial_invalidation = \
            data['record_time_partial_invalidation_ms']
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_painting_disabled', 'ms',
                               record_time_painting_disabled))
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_caching_disabled', 'ms',
                               record_time_caching_disabled))
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_construction_disabled', 'ms',
                               record_time_construction_disabled))
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_subsequence_caching_disabled',
                               'ms', record_time_subsequence_caching_disabled))
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'record_time_partial_invalidation_ms', 'ms',
                               record_time_partial_invalidation))

        if self._report_detailed_results:
            pixels_rasterized_with_non_solid_color = \
                data['pixels_rasterized_with_non_solid_color']
            pixels_rasterized_as_opaque = data['pixels_rasterized_as_opaque']
            total_layers = data['total_layers']
            total_picture_layers = data['total_picture_layers']
            total_picture_layers_with_no_content = \
                data['total_picture_layers_with_no_content']
            total_picture_layers_off_screen = data[
                'total_picture_layers_off_screen']

            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'pixels_rasterized_with_non_solid_color',
                                   'pixels',
                                   pixels_rasterized_with_non_solid_color))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'pixels_rasterized_as_opaque', 'pixels',
                                   pixels_rasterized_as_opaque))
            results.AddValue(
                scalar.ScalarValue(results.current_page, 'total_layers',
                                   'count', total_layers))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'total_picture_layers', 'count',
                                   total_picture_layers))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'total_picture_layers_with_no_content',
                                   'count',
                                   total_picture_layers_with_no_content))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'total_picture_layers_off_screen', 'count',
                                   total_picture_layers_off_screen))
Beispiel #15
0
  def ValidateAndMeasurePage(self, page, tab, results):
    del page  # unused
    try:
      tab.WaitForDocumentReadyStateToBeComplete()
    except py_utils.TimeoutException:
      pass
    time.sleep(self._start_wait_time)

    # Enqueue benchmark
    tab.ExecuteJavaScript("""
        window.benchmark_results = {};
        window.benchmark_results.done = false;
        window.benchmark_results.id =
            chrome.gpuBenchmarking.runMicroBenchmark(
                "rasterize_and_record_benchmark",
                function(value) {
                  window.benchmark_results.done = true;
                  window.benchmark_results.results = value;
                }, {
                  "record_repeat_count": {{ record_repeat_count }},
                  "rasterize_repeat_count": {{ rasterize_repeat_count }}
                });
        """,
        record_repeat_count=self._record_repeat,
        rasterize_repeat_count=self._rasterize_repeat)

    # Evaluating this expression usually takes between 60 and 90 seconds.
    benchmark_id = tab.EvaluateJavaScript(
        'window.benchmark_results.id', timeout=self._timeout)
    if not benchmark_id:
      raise legacy_page_test.MeasurementFailure(
          'Failed to schedule rasterize_and_record_micro')

    tab.WaitForJavaScriptCondition(
        'window.benchmark_results.done', timeout=self._timeout)

    data = tab.EvaluateJavaScript('window.benchmark_results.results')

    pixels_recorded = data['pixels_recorded']
    pixels_rasterized = data['pixels_rasterized']
    painter_memory_usage = data.get('painter_memory_usage', 0)
    paint_op_memory_usage = data.get('paint_op_memory_usage', 0)
    paint_op_count = data.get('paint_op_count', 0)

    results.AddMeasurement('pixels_recorded', 'count', pixels_recorded)
    results.AddMeasurement('pixels_rasterized', 'count', pixels_rasterized)
    results.AddMeasurement('painter_memory_usage', 'bytes',
                           painter_memory_usage)
    results.AddMeasurement('paint_op_memory_usage', 'bytes',
                           paint_op_memory_usage)
    results.AddMeasurement('paint_op_count', 'count', paint_op_count)

    for metric in ('rasterize_time', 'record_time',
                   'record_time_caching_disabled',
                   'record_time_subsequence_caching_disabled',
                   'record_time_partial_invalidation',
                   'raster_invalidation_and_convert_time',
                   'paint_artifact_compositor_update_time'):
      results.AddMeasurement(metric, 'ms', data.get(metric + '_ms', 0))

    if self._report_detailed_results:
      for metric in ('pixels_rasterized_with_non_solid_color',
                     'pixels_rasterized_as_opaque', 'total_layers',
                     'total_picture_layers',
                     'total_picture_layers_with_no_content',
                     'total_picture_layers_off_screen'):
        results.AddMeasurement(metric, 'count', data[metric])

      lcd_text_pixels = data['visible_pixels_by_lcd_text_disallowed_reason']
      for reason in lcd_text_pixels:
        if reason == 'none':
          name = 'visible_pixels_lcd_text'
        else:
          name = 'visible_pixels_non_lcd_text:' + reason
        results.AddMeasurement(name, 'count', lcd_text_pixels[reason])