Пример #1
0
    def AddResults(self, model, renderer_thread, interaction_record, results):
        renderer_process = renderer_thread.parent
        time_bounds = bounds.Bounds()
        time_bounds.AddValue(interaction_record.start)
        time_bounds.AddValue(interaction_record.end)
        stats = rendering_stats.RenderingStats(renderer_process,
                                               model.browser_process,
                                               [time_bounds])
        if stats.mouse_wheel_scroll_latency:
            mean_mouse_wheel_scroll_latency = statistics.ArithmeticMean(
                stats.mouse_wheel_scroll_latency)
            mouse_wheel_scroll_latency_discrepancy = statistics.DurationsDiscrepancy(
                stats.mouse_wheel_scroll_latency)
            results.Add('mean_mouse_wheel_scroll_latency', 'ms',
                        round(mean_mouse_wheel_scroll_latency, 3))
            results.Add('mouse_wheel_scroll_latency_discrepancy', '',
                        round(mouse_wheel_scroll_latency_discrepancy, 4))

        if stats.touch_scroll_latency:
            mean_touch_scroll_latency = statistics.ArithmeticMean(
                stats.touch_scroll_latency)
            touch_scroll_latency_discrepancy = statistics.DurationsDiscrepancy(
                stats.touch_scroll_latency)
            results.Add('mean_touch_scroll_latency', 'ms',
                        round(mean_touch_scroll_latency, 3))
            results.Add('touch_scroll_latency_discrepancy', '',
                        round(touch_scroll_latency_discrepancy, 4))

        if stats.js_touch_scroll_latency:
            mean_js_touch_scroll_latency = statistics.ArithmeticMean(
                stats.js_touch_scroll_latency)
            js_touch_scroll_latency_discrepancy = statistics.DurationsDiscrepancy(
                stats.js_touch_scroll_latency)
            results.Add('mean_js_touch_scroll_latency', 'ms',
                        round(mean_js_touch_scroll_latency, 3))
            results.Add('js_touch_scroll_latency_discrepancy', '',
                        round(js_touch_scroll_latency_discrepancy, 4))

        # List of raw frame times.
        frame_times = FlattenList(stats.frame_times)
        results.Add('frame_times', 'ms', frame_times)

        # Arithmetic mean of frame times.
        mean_frame_time = statistics.ArithmeticMean(frame_times)
        results.Add('mean_frame_time', 'ms', round(mean_frame_time, 3))

        # Absolute discrepancy of frame time stamps.
        frame_discrepancy = statistics.TimestampsDiscrepancy(
            stats.frame_timestamps)
        results.Add('jank', 'ms', round(frame_discrepancy, 4))

        # Are we hitting 60 fps for 95 percent of all frames?
        # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0.
        percentile_95 = statistics.Percentile(frame_times, 95.0)
        results.Add('mostly_smooth', 'score',
                    1.0 if percentile_95 < 19.0 else 0.0)
Пример #2
0
 def _ComputeLatencyMetric(self, page, stats, name, list_of_latency_lists):
     """Returns Values for the mean and discrepancy for given latency stats."""
     mean_latency = None
     latency_discrepancy = None
     none_value_reason = None
     if self._HasEnoughFrames(stats.frame_timestamps):
         latency_list = perf_tests_helper.FlattenList(list_of_latency_lists)
         if len(latency_list) == 0:
             return ()
         mean_latency = round(statistics.ArithmeticMean(latency_list), 3)
         latency_discrepancy = (round(
             statistics.DurationsDiscrepancy(latency_list), 4))
     else:
         none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
     return (scalar.ScalarValue(
         page,
         'mean_%s' % name,
         'ms',
         mean_latency,
         description='Arithmetic mean of the raw %s values' % name,
         none_value_reason=none_value_reason),
             scalar.ScalarValue(
                 page,
                 '%s_discrepancy' % name,
                 'ms',
                 latency_discrepancy,
                 description='Discrepancy of the raw %s values' % name,
                 none_value_reason=none_value_reason))
Пример #3
0
  def testDurationsDiscrepancy(self):
    durations = []
    d = statistics.DurationsDiscrepancy(durations)
    self.assertEquals(d, 0.0)

    durations = [4]
    d = statistics.DurationsDiscrepancy(durations)
    self.assertEquals(d, 4.0)

    durations_a = [1, 1, 1, 1, 1]
    durations_b = [1, 1, 2, 1, 1]
    durations_c = [1, 2, 1, 2, 1]

    d_a = statistics.DurationsDiscrepancy(durations_a)
    d_b = statistics.DurationsDiscrepancy(durations_b)
    d_c = statistics.DurationsDiscrepancy(durations_c)

    self.assertTrue(d_a < d_b < d_c)
Пример #4
0
    def AddResults(self, model, renderer_thread, interaction_records, results):
        self.VerifyNonOverlappedRecords(interaction_records)
        renderer_process = renderer_thread.parent
        stats = rendering_stats.RenderingStats(
            renderer_process, model.browser_process,
            [r.GetBounds() for r in interaction_records])

        input_event_latency = FlattenList(stats.input_event_latency)
        if input_event_latency:
            mean_input_event_latency = statistics.ArithmeticMean(
                input_event_latency)
            input_event_latency_discrepancy = statistics.DurationsDiscrepancy(
                input_event_latency)
            results.Add('mean_input_event_latency', 'ms',
                        round(mean_input_event_latency, 3))
            results.Add('input_event_latency_discrepancy', 'ms',
                        round(input_event_latency_discrepancy, 4))

        # List of raw frame times.
        frame_times = FlattenList(stats.frame_times)
        results.Add('frame_times', 'ms', frame_times)

        # Arithmetic mean of frame times.
        mean_frame_time = statistics.ArithmeticMean(frame_times)
        results.Add('mean_frame_time', 'ms', round(mean_frame_time, 3))

        # Absolute discrepancy of frame time stamps.
        frame_discrepancy = statistics.TimestampsDiscrepancy(
            stats.frame_timestamps)
        results.Add('jank', 'ms', round(frame_discrepancy, 4))

        # Are we hitting 60 fps for 95 percent of all frames?
        # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0.
        percentile_95 = statistics.Percentile(frame_times, 95.0)
        results.Add('mostly_smooth', 'score',
                    1.0 if percentile_95 < 19.0 else 0.0)

        # Mean percentage of pixels approximated (missing tiles, low resolution
        # tiles, non-ideal resolution tiles)
        results.Add(
            'mean_pixels_approximated', 'percent',
            round(
                statistics.ArithmeticMean(
                    FlattenList(stats.approximated_pixel_percentages)), 3))
Пример #5
0
    def AddResults(self, model, renderer_thread, interaction_records, results):
        self.VerifyNonOverlappedRecords(interaction_records)
        renderer_process = renderer_thread.parent
        stats = rendering_stats.RenderingStats(
            renderer_process, model.browser_process,
            [r.GetBounds() for r in interaction_records])

        input_event_latency = FlattenList(stats.input_event_latency)
        if input_event_latency:
            mean_input_event_latency = statistics.ArithmeticMean(
                input_event_latency)
            input_event_latency_discrepancy = statistics.DurationsDiscrepancy(
                input_event_latency)
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'mean_input_event_latency', 'ms',
                                   round(mean_input_event_latency, 3)))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'input_event_latency_discrepancy', 'ms',
                                   round(input_event_latency_discrepancy, 4)))
        scroll_update_latency = FlattenList(stats.scroll_update_latency)
        if scroll_update_latency:
            mean_scroll_update_latency = statistics.ArithmeticMean(
                scroll_update_latency)
            scroll_update_latency_discrepancy = statistics.DurationsDiscrepancy(
                scroll_update_latency)
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'mean_scroll_update_latency', 'ms',
                                   round(mean_scroll_update_latency, 3)))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'scroll_update_latency_discrepancy', 'ms',
                                   round(scroll_update_latency_discrepancy,
                                         4)))
        gesture_scroll_update_latency = FlattenList(
            stats.gesture_scroll_update_latency)
        if gesture_scroll_update_latency:
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'first_gesture_scroll_update_latency', 'ms',
                                   round(gesture_scroll_update_latency[0], 4)))

        # List of queueing durations.
        frame_queueing_durations = FlattenList(stats.frame_queueing_durations)
        if frame_queueing_durations:
            results.AddValue(
                list_of_scalar_values.ListOfScalarValues(
                    results.current_page, 'queueing_durations', 'ms',
                    frame_queueing_durations))

        # List of raw frame times.
        frame_times = FlattenList(stats.frame_times)
        results.AddValue(
            list_of_scalar_values.ListOfScalarValues(
                results.current_page,
                'frame_times',
                'ms',
                frame_times,
                description=
                'List of raw frame times, helpful to understand the other '
                'metrics.'))

        # Arithmetic mean of frame times.
        mean_frame_time = statistics.ArithmeticMean(frame_times)
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'mean_frame_time',
                               'ms',
                               round(mean_frame_time, 3),
                               description='Arithmetic mean of frame times.'))

        # Absolute discrepancy of frame time stamps.
        frame_discrepancy = statistics.TimestampsDiscrepancy(
            stats.frame_timestamps)
        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                'jank',
                'ms',
                round(frame_discrepancy, 4),
                description='Absolute discrepancy of frame time stamps, where '
                'discrepancy is a measure of irregularity. It quantifies '
                'the worst jank. For a single pause, discrepancy '
                'corresponds to the length of this pause in milliseconds. '
                'Consecutive pauses increase the discrepancy. This metric '
                'is important because even if the mean and 95th '
                'percentile are good, one long pause in the middle of an '
                'interaction is still bad.'))

        # Are we hitting 60 fps for 95 percent of all frames?
        # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0.
        percentile_95 = statistics.Percentile(frame_times, 95.0)
        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                'mostly_smooth',
                'score',
                1.0 if percentile_95 < 19.0 else 0.0,
                description='Were 95 percent of the frames hitting 60 fps?'
                'boolean value (1/0).'))

        # Mean percentage of pixels approximated (missing tiles, low resolution
        # tiles, non-ideal resolution tiles).
        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                'mean_pixels_approximated',
                'percent',
                round(
                    statistics.ArithmeticMean(
                        FlattenList(stats.approximated_pixel_percentages)), 3),
                description='Percentage of pixels that were approximated '
                '(checkerboarding, low-resolution tiles, etc.).'))