def testDiscrepancyMultipleRanges(self): samples = [[0.0, 1.2, 2.3, 3.3], [6.3, 7.5, 8.4], [4.2, 5.4, 5.9]] d_0 = statistics.TimestampsDiscrepancy(samples[0]) d_1 = statistics.TimestampsDiscrepancy(samples[1]) d_2 = statistics.TimestampsDiscrepancy(samples[2]) d = statistics.TimestampsDiscrepancy(samples) self.assertEquals(d, max(d_0, d_1, d_2))
def testTimestampsDiscrepancy(self): time_stamps = [] d_abs = statistics.TimestampsDiscrepancy(time_stamps, True) self.assertEquals(d_abs, 0.0) time_stamps = [4] d_abs = statistics.TimestampsDiscrepancy(time_stamps, True) self.assertEquals(d_abs, 0.5) time_stamps_a = [0, 1, 2, 3, 5, 6] time_stamps_b = [0, 1, 2, 3, 5, 7] time_stamps_c = [0, 2, 3, 4] time_stamps_d = [0, 2, 3, 4, 5] d_abs_a = statistics.TimestampsDiscrepancy(time_stamps_a, True) d_abs_b = statistics.TimestampsDiscrepancy(time_stamps_b, True) d_abs_c = statistics.TimestampsDiscrepancy(time_stamps_c, True) d_abs_d = statistics.TimestampsDiscrepancy(time_stamps_d, True) d_rel_a = statistics.TimestampsDiscrepancy(time_stamps_a, False) d_rel_b = statistics.TimestampsDiscrepancy(time_stamps_b, False) d_rel_c = statistics.TimestampsDiscrepancy(time_stamps_c, False) d_rel_d = statistics.TimestampsDiscrepancy(time_stamps_d, False) self.assertTrue(d_abs_a < d_abs_b) self.assertTrue(d_rel_a < d_rel_b) self.assertTrue(d_rel_d < d_rel_c) self.assertAlmostEquals(d_abs_d, d_abs_c)
def _ComputeFrameTimeDiscrepancy(self, page, stats): """Returns a Value for the absolute discrepancy of frame time stamps.""" frame_discrepancy = None none_value_reason = None if self._HasEnoughFrames(stats.frame_timestamps): frame_discrepancy = round( statistics.TimestampsDiscrepancy(stats.frame_timestamps), 4) else: none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE return scalar.ScalarValue( page, 'frame_time_discrepancy', 'ms', frame_discrepancy, description='Absolute discrepancy of frame time stamps, where ' 'discrepancy is a measure of irregularity. It quantifies ' 'the worst jank. For a single pause, discrepancy ' 'corresponds to the length of this pause in milliseconds. ' 'Consecutive pauses increase the discrepancy. This metric ' 'is important because even if the mean and 95th ' 'percentile are good, one long pause in the middle of an ' 'interaction is still bad.', none_value_reason=none_value_reason, improvement_direction=improvement_direction.DOWN)
def testDiscrepancyAnalytic(self): """Computes discrepancy for sample sets with known statistics.""" interval_multiplier = 100000 samples = [] d = statistics.Discrepancy(samples, interval_multiplier) self.assertEquals(d, 1.0) samples = [0.5] d = statistics.Discrepancy(samples, interval_multiplier) self.assertEquals(round(d), 1.0) samples = [0.0, 1.0] d = statistics.Discrepancy(samples, interval_multiplier) self.assertAlmostEquals(round(d, 2), 1.0) samples = [0.5, 0.5, 0.5] d = statistics.Discrepancy(samples, interval_multiplier) self.assertAlmostEquals(d, 1.0) samples = [1.0 / 8.0, 3.0 / 8.0, 5.0 / 8.0, 7.0 / 8.0] d = statistics.Discrepancy(samples, interval_multiplier) self.assertAlmostEquals(round(d, 2), 0.25) samples = [0.0, 1.0 / 3.0, 2.0 / 3.0, 1.0] d = statistics.Discrepancy(samples, interval_multiplier) self.assertAlmostEquals(round(d, 2), 0.5) samples = statistics.NormalizeSamples(samples)[0] d = statistics.Discrepancy(samples, interval_multiplier) self.assertAlmostEquals(round(d, 2), 0.25) time_stamps_a = [0, 1, 2, 3, 5, 6] time_stamps_b = [0, 1, 2, 3, 5, 7] time_stamps_c = [0, 2, 3, 4] time_stamps_d = [0, 2, 3, 4, 5] d_abs_a = statistics.TimestampsDiscrepancy(time_stamps_a, True, interval_multiplier) d_abs_b = statistics.TimestampsDiscrepancy(time_stamps_b, True, interval_multiplier) d_abs_c = statistics.TimestampsDiscrepancy(time_stamps_c, True, interval_multiplier) d_abs_d = statistics.TimestampsDiscrepancy(time_stamps_d, True, interval_multiplier) d_rel_a = statistics.TimestampsDiscrepancy(time_stamps_a, False, interval_multiplier) d_rel_b = statistics.TimestampsDiscrepancy(time_stamps_b, False, interval_multiplier) d_rel_c = statistics.TimestampsDiscrepancy(time_stamps_c, False, interval_multiplier) d_rel_d = statistics.TimestampsDiscrepancy(time_stamps_d, False, interval_multiplier) self.assertTrue(d_abs_a < d_abs_b) self.assertTrue(d_rel_a < d_rel_b) self.assertTrue(d_rel_d < d_rel_c) self.assertEquals(round(d_abs_d, 2), round(d_abs_c, 2))
def AddResults(self, model, renderer_thread, interaction_record, results): renderer_process = renderer_thread.parent time_bounds = bounds.Bounds() time_bounds.AddValue(interaction_record.start) time_bounds.AddValue(interaction_record.end) stats = rendering_stats.RenderingStats(renderer_process, model.browser_process, [time_bounds]) if stats.mouse_wheel_scroll_latency: mean_mouse_wheel_scroll_latency = statistics.ArithmeticMean( stats.mouse_wheel_scroll_latency) mouse_wheel_scroll_latency_discrepancy = statistics.DurationsDiscrepancy( stats.mouse_wheel_scroll_latency) results.Add('mean_mouse_wheel_scroll_latency', 'ms', round(mean_mouse_wheel_scroll_latency, 3)) results.Add('mouse_wheel_scroll_latency_discrepancy', '', round(mouse_wheel_scroll_latency_discrepancy, 4)) if stats.touch_scroll_latency: mean_touch_scroll_latency = statistics.ArithmeticMean( stats.touch_scroll_latency) touch_scroll_latency_discrepancy = statistics.DurationsDiscrepancy( stats.touch_scroll_latency) results.Add('mean_touch_scroll_latency', 'ms', round(mean_touch_scroll_latency, 3)) results.Add('touch_scroll_latency_discrepancy', '', round(touch_scroll_latency_discrepancy, 4)) if stats.js_touch_scroll_latency: mean_js_touch_scroll_latency = statistics.ArithmeticMean( stats.js_touch_scroll_latency) js_touch_scroll_latency_discrepancy = statistics.DurationsDiscrepancy( stats.js_touch_scroll_latency) results.Add('mean_js_touch_scroll_latency', 'ms', round(mean_js_touch_scroll_latency, 3)) results.Add('js_touch_scroll_latency_discrepancy', '', round(js_touch_scroll_latency_discrepancy, 4)) # List of raw frame times. frame_times = FlattenList(stats.frame_times) results.Add('frame_times', 'ms', frame_times) # Arithmetic mean of frame times. mean_frame_time = statistics.ArithmeticMean(frame_times) results.Add('mean_frame_time', 'ms', round(mean_frame_time, 3)) # Absolute discrepancy of frame time stamps. frame_discrepancy = statistics.TimestampsDiscrepancy( stats.frame_timestamps) results.Add('jank', 'ms', round(frame_discrepancy, 4)) # Are we hitting 60 fps for 95 percent of all frames? # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0. percentile_95 = statistics.Percentile(frame_times, 95.0) results.Add('mostly_smooth', 'score', 1.0 if percentile_95 < 19.0 else 0.0)
def AddResults(self, model, renderer_thread, interaction_records, results): self.VerifyNonOverlappedRecords(interaction_records) renderer_process = renderer_thread.parent stats = rendering_stats.RenderingStats( renderer_process, model.browser_process, [r.GetBounds() for r in interaction_records]) input_event_latency = FlattenList(stats.input_event_latency) if input_event_latency: mean_input_event_latency = statistics.ArithmeticMean( input_event_latency) input_event_latency_discrepancy = statistics.DurationsDiscrepancy( input_event_latency) results.Add('mean_input_event_latency', 'ms', round(mean_input_event_latency, 3)) results.Add('input_event_latency_discrepancy', 'ms', round(input_event_latency_discrepancy, 4)) # List of raw frame times. frame_times = FlattenList(stats.frame_times) results.Add('frame_times', 'ms', frame_times) # Arithmetic mean of frame times. mean_frame_time = statistics.ArithmeticMean(frame_times) results.Add('mean_frame_time', 'ms', round(mean_frame_time, 3)) # Absolute discrepancy of frame time stamps. frame_discrepancy = statistics.TimestampsDiscrepancy( stats.frame_timestamps) results.Add('jank', 'ms', round(frame_discrepancy, 4)) # Are we hitting 60 fps for 95 percent of all frames? # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0. percentile_95 = statistics.Percentile(frame_times, 95.0) results.Add('mostly_smooth', 'score', 1.0 if percentile_95 < 19.0 else 0.0) # Mean percentage of pixels approximated (missing tiles, low resolution # tiles, non-ideal resolution tiles) results.Add( 'mean_pixels_approximated', 'percent', round( statistics.ArithmeticMean( FlattenList(stats.approximated_pixel_percentages)), 3))
def AddResults(self, model, renderer_thread, interaction_records, results): self.VerifyNonOverlappedRecords(interaction_records) renderer_process = renderer_thread.parent stats = rendering_stats.RenderingStats( renderer_process, model.browser_process, [r.GetBounds() for r in interaction_records]) input_event_latency = FlattenList(stats.input_event_latency) if input_event_latency: mean_input_event_latency = statistics.ArithmeticMean( input_event_latency) input_event_latency_discrepancy = statistics.DurationsDiscrepancy( input_event_latency) results.AddValue( scalar.ScalarValue(results.current_page, 'mean_input_event_latency', 'ms', round(mean_input_event_latency, 3))) results.AddValue( scalar.ScalarValue(results.current_page, 'input_event_latency_discrepancy', 'ms', round(input_event_latency_discrepancy, 4))) scroll_update_latency = FlattenList(stats.scroll_update_latency) if scroll_update_latency: mean_scroll_update_latency = statistics.ArithmeticMean( scroll_update_latency) scroll_update_latency_discrepancy = statistics.DurationsDiscrepancy( scroll_update_latency) results.AddValue( scalar.ScalarValue(results.current_page, 'mean_scroll_update_latency', 'ms', round(mean_scroll_update_latency, 3))) results.AddValue( scalar.ScalarValue(results.current_page, 'scroll_update_latency_discrepancy', 'ms', round(scroll_update_latency_discrepancy, 4))) gesture_scroll_update_latency = FlattenList( stats.gesture_scroll_update_latency) if gesture_scroll_update_latency: results.AddValue( scalar.ScalarValue(results.current_page, 'first_gesture_scroll_update_latency', 'ms', round(gesture_scroll_update_latency[0], 4))) # List of queueing durations. frame_queueing_durations = FlattenList(stats.frame_queueing_durations) if frame_queueing_durations: results.AddValue( list_of_scalar_values.ListOfScalarValues( results.current_page, 'queueing_durations', 'ms', frame_queueing_durations)) # List of raw frame times. frame_times = FlattenList(stats.frame_times) results.AddValue( list_of_scalar_values.ListOfScalarValues( results.current_page, 'frame_times', 'ms', frame_times, description= 'List of raw frame times, helpful to understand the other ' 'metrics.')) # Arithmetic mean of frame times. mean_frame_time = statistics.ArithmeticMean(frame_times) results.AddValue( scalar.ScalarValue(results.current_page, 'mean_frame_time', 'ms', round(mean_frame_time, 3), description='Arithmetic mean of frame times.')) # Absolute discrepancy of frame time stamps. frame_discrepancy = statistics.TimestampsDiscrepancy( stats.frame_timestamps) results.AddValue( scalar.ScalarValue( results.current_page, 'jank', 'ms', round(frame_discrepancy, 4), description='Absolute discrepancy of frame time stamps, where ' 'discrepancy is a measure of irregularity. It quantifies ' 'the worst jank. For a single pause, discrepancy ' 'corresponds to the length of this pause in milliseconds. ' 'Consecutive pauses increase the discrepancy. This metric ' 'is important because even if the mean and 95th ' 'percentile are good, one long pause in the middle of an ' 'interaction is still bad.')) # Are we hitting 60 fps for 95 percent of all frames? # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0. percentile_95 = statistics.Percentile(frame_times, 95.0) results.AddValue( scalar.ScalarValue( results.current_page, 'mostly_smooth', 'score', 1.0 if percentile_95 < 19.0 else 0.0, description='Were 95 percent of the frames hitting 60 fps?' 'boolean value (1/0).')) # Mean percentage of pixels approximated (missing tiles, low resolution # tiles, non-ideal resolution tiles). results.AddValue( scalar.ScalarValue( results.current_page, 'mean_pixels_approximated', 'percent', round( statistics.ArithmeticMean( FlattenList(stats.approximated_pixel_percentages)), 3), description='Percentage of pixels that were approximated ' '(checkerboarding, low-resolution tiles, etc.).'))