コード例 #1
0
 def testArithmeticMean(self):
     # The ArithmeticMean function computes the simple average.
     self.assertAlmostEquals(40 / 3.0,
                             statistics.ArithmeticMean([10, 10, 20]))
     self.assertAlmostEquals(15.0, statistics.ArithmeticMean([10, 20]))
     # If the 'count' is zero, then zero is returned.
     self.assertEquals(0, statistics.ArithmeticMean([]))
コード例 #2
0
ファイル: smoothness.py プロジェクト: hmin/chromium-crosswalk
    def AddResults(self, model, renderer_thread, interaction_record, results):
        renderer_process = renderer_thread.parent
        time_bounds = bounds.Bounds()
        time_bounds.AddValue(interaction_record.start)
        time_bounds.AddValue(interaction_record.end)
        stats = rendering_stats.RenderingStats(renderer_process,
                                               model.browser_process,
                                               [time_bounds])
        if stats.mouse_wheel_scroll_latency:
            mean_mouse_wheel_scroll_latency = statistics.ArithmeticMean(
                stats.mouse_wheel_scroll_latency)
            mouse_wheel_scroll_latency_discrepancy = statistics.DurationsDiscrepancy(
                stats.mouse_wheel_scroll_latency)
            results.Add('mean_mouse_wheel_scroll_latency', 'ms',
                        round(mean_mouse_wheel_scroll_latency, 3))
            results.Add('mouse_wheel_scroll_latency_discrepancy', '',
                        round(mouse_wheel_scroll_latency_discrepancy, 4))

        if stats.touch_scroll_latency:
            mean_touch_scroll_latency = statistics.ArithmeticMean(
                stats.touch_scroll_latency)
            touch_scroll_latency_discrepancy = statistics.DurationsDiscrepancy(
                stats.touch_scroll_latency)
            results.Add('mean_touch_scroll_latency', 'ms',
                        round(mean_touch_scroll_latency, 3))
            results.Add('touch_scroll_latency_discrepancy', '',
                        round(touch_scroll_latency_discrepancy, 4))

        if stats.js_touch_scroll_latency:
            mean_js_touch_scroll_latency = statistics.ArithmeticMean(
                stats.js_touch_scroll_latency)
            js_touch_scroll_latency_discrepancy = statistics.DurationsDiscrepancy(
                stats.js_touch_scroll_latency)
            results.Add('mean_js_touch_scroll_latency', 'ms',
                        round(mean_js_touch_scroll_latency, 3))
            results.Add('js_touch_scroll_latency_discrepancy', '',
                        round(js_touch_scroll_latency_discrepancy, 4))

        # List of raw frame times.
        frame_times = FlattenList(stats.frame_times)
        results.Add('frame_times', 'ms', frame_times)

        # Arithmetic mean of frame times.
        mean_frame_time = statistics.ArithmeticMean(frame_times)
        results.Add('mean_frame_time', 'ms', round(mean_frame_time, 3))

        # Absolute discrepancy of frame time stamps.
        frame_discrepancy = statistics.TimestampsDiscrepancy(
            stats.frame_timestamps)
        results.Add('jank', 'ms', round(frame_discrepancy, 4))

        # Are we hitting 60 fps for 95 percent of all frames?
        # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0.
        percentile_95 = statistics.Percentile(frame_times, 95.0)
        results.Add('mostly_smooth', 'score',
                    1.0 if percentile_95 < 19.0 else 0.0)
コード例 #3
0
    def _ComputeMeanPixelsCheckerboarded(self, page, stats):
        """Add the mean percentage of pixels checkerboarded.

    This looks at tiles which are only missing.
    It does not take into consideration tiles which are of low or
    non-ideal resolution.
    """
        mean_pixels_checkerboarded = None
        none_value_reason = None
        if self._HasEnoughFrames(stats.frame_timestamps):
            if rendering_stats.CHECKERBOARDED_PIXEL_ERROR in stats.errors:
                none_value_reason = stats.errors[
                    rendering_stats.CHECKERBOARDED_PIXEL_ERROR]
            else:
                mean_pixels_checkerboarded = round(
                    statistics.ArithmeticMean(
                        perf_tests_helper.FlattenList(
                            stats.checkerboarded_pixel_percentages)), 3)
        else:
            none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
        return scalar.ScalarValue(
            page,
            'mean_pixels_checkerboarded',
            'percent',
            mean_pixels_checkerboarded,
            description='Percentage of pixels that were checkerboarded.',
            none_value_reason=none_value_reason)
コード例 #4
0
 def _ComputeLatencyMetric(self, page, stats, name, list_of_latency_lists):
     """Returns Values for the mean and discrepancy for given latency stats."""
     mean_latency = None
     latency_discrepancy = None
     none_value_reason = None
     if self._HasEnoughFrames(stats.frame_timestamps):
         latency_list = perf_tests_helper.FlattenList(list_of_latency_lists)
         if len(latency_list) == 0:
             return ()
         mean_latency = round(statistics.ArithmeticMean(latency_list), 3)
         latency_discrepancy = (round(
             statistics.DurationsDiscrepancy(latency_list), 4))
     else:
         none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
     return (scalar.ScalarValue(
         page,
         'mean_%s' % name,
         'ms',
         mean_latency,
         description='Arithmetic mean of the raw %s values' % name,
         none_value_reason=none_value_reason),
             scalar.ScalarValue(
                 page,
                 '%s_discrepancy' % name,
                 'ms',
                 latency_discrepancy,
                 description='Discrepancy of the raw %s values' % name,
                 none_value_reason=none_value_reason))
コード例 #5
0
    def AddResults(self, model, renderer_thread, interaction_records, results):
        self.VerifyNonOverlappedRecords(interaction_records)
        renderer_process = renderer_thread.parent
        stats = rendering_stats.RenderingStats(
            renderer_process, model.browser_process,
            [r.GetBounds() for r in interaction_records])

        input_event_latency = FlattenList(stats.input_event_latency)
        if input_event_latency:
            mean_input_event_latency = statistics.ArithmeticMean(
                input_event_latency)
            input_event_latency_discrepancy = statistics.DurationsDiscrepancy(
                input_event_latency)
            results.Add('mean_input_event_latency', 'ms',
                        round(mean_input_event_latency, 3))
            results.Add('input_event_latency_discrepancy', 'ms',
                        round(input_event_latency_discrepancy, 4))

        # List of raw frame times.
        frame_times = FlattenList(stats.frame_times)
        results.Add('frame_times', 'ms', frame_times)

        # Arithmetic mean of frame times.
        mean_frame_time = statistics.ArithmeticMean(frame_times)
        results.Add('mean_frame_time', 'ms', round(mean_frame_time, 3))

        # Absolute discrepancy of frame time stamps.
        frame_discrepancy = statistics.TimestampsDiscrepancy(
            stats.frame_timestamps)
        results.Add('jank', 'ms', round(frame_discrepancy, 4))

        # Are we hitting 60 fps for 95 percent of all frames?
        # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0.
        percentile_95 = statistics.Percentile(frame_times, 95.0)
        results.Add('mostly_smooth', 'score',
                    1.0 if percentile_95 < 19.0 else 0.0)

        # Mean percentage of pixels approximated (missing tiles, low resolution
        # tiles, non-ideal resolution tiles)
        results.Add(
            'mean_pixels_approximated', 'percent',
            round(
                statistics.ArithmeticMean(
                    FlattenList(stats.approximated_pixel_percentages)), 3))
コード例 #6
0
    def _ComputeFrameTimeMetric(self, prefix, page, frame_timestamps,
                                frame_times):
        """Returns Values for the frame time metrics.

    This includes the raw and mean frame times, as well as the percentage of
    frames that were hitting 60 fps.
    """
        flatten_frame_times = None
        mean_frame_time = None
        percentage_smooth = None
        none_value_reason = None
        if self._HasEnoughFrames(frame_timestamps):
            flatten_frame_times = perf_tests_helper.FlattenList(frame_times)
            mean_frame_time = round(
                statistics.ArithmeticMean(flatten_frame_times), 3)
            # We use 17ms as a somewhat looser threshold, instead of 1000.0/60.0.
            smooth_threshold = 17.0
            smooth_count = sum(1 for t in flatten_frame_times
                               if t < smooth_threshold)
            percentage_smooth = float(smooth_count) / len(
                flatten_frame_times) * 100.0
        else:
            none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
        return (list_of_scalar_values.ListOfScalarValues(
            page,
            '%sframe_times' % prefix,
            'ms',
            flatten_frame_times,
            description='List of raw frame times, helpful to understand the '
            'other metrics.',
            none_value_reason=none_value_reason,
            improvement_direction=improvement_direction.DOWN),
                scalar.ScalarValue(
                    page,
                    '%smean_frame_time' % prefix,
                    'ms',
                    mean_frame_time,
                    description='Arithmetic mean of frame times.',
                    none_value_reason=none_value_reason,
                    improvement_direction=improvement_direction.DOWN),
                scalar.ScalarValue(
                    page,
                    '%spercentage_smooth' % prefix,
                    'score',
                    percentage_smooth,
                    description=
                    'Percentage of frames that were hitting 60 fps.',
                    none_value_reason=none_value_reason,
                    improvement_direction=improvement_direction.UP))
コード例 #7
0
def _CollectData(output_path, is_collecting):
    mon = monsoon.Monsoon(wait=False)
    # Note: Telemetry requires the device to be connected by USB, but that
    # puts it in charging mode. This increases the power consumption.
    mon.SetUsbPassthrough(1)
    # Nominal Li-ion voltage is 3.7V, but it puts out 4.2V at max capacity. Use
    # 4.0V to simulate a "~80%" charged battery. Google "li-ion voltage curve".
    # This is true only for a single cell. (Most smartphones, some tablets.)
    mon.SetVoltage(4.0)

    samples = []
    try:
        mon.StartDataCollection()
        # Do one CollectData() to make the Monsoon set up, which takes about
        # 0.3 seconds, and only signal that we've started after that.
        mon.CollectData()
        is_collecting.set()
        while is_collecting.is_set():
            samples += mon.CollectData()
    finally:
        mon.StopDataCollection()

    # Add x-axis labels.
    plot_data = [(i / 5000., sample.amps * sample.volts)
                 for i, sample in enumerate(samples)]

    # Print data in csv.
    with open(output_path, 'w') as output_file:
        output_writer = csv.writer(output_file)
        output_writer.writerows(plot_data)
        output_file.flush()

    power_samples = [s.amps * s.volts for s in samples]

    print 'Monsoon profile power readings in watts:'
    print '  Total    = %f' % statistics.TrapezoidalRule(
        power_samples, 1 / 5000.)
    print('  Average  = %f' % statistics.ArithmeticMean(power_samples) +
          '+-%f' % statistics.StandardDeviation(power_samples))
    print '  Peak     = %f' % max(power_samples)
    print '  Duration = %f' % (len(power_samples) / 5000.)

    print 'To view the Monsoon profile, run:'
    print(
        '  echo "set datafile separator \',\'; plot \'%s\' with lines" | '
        'gnuplot --persist' % output_path)
コード例 #8
0
  def _ComputeMeanPixelsApproximated(self, page, stats):
    """Add the mean percentage of pixels approximated.

    This looks at tiles which are missing or of low or non-ideal resolution.
    """
    mean_pixels_approximated = None
    none_value_reason = None
    if self._HasEnoughFrames(stats.frame_timestamps):
      mean_pixels_approximated = round(statistics.ArithmeticMean(
          perf_tests_helper.FlattenList(
              stats.approximated_pixel_percentages)), 3)
    else:
      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
    return scalar.ScalarValue(
        page, 'mean_pixels_approximated', 'percent', mean_pixels_approximated,
        description='Percentage of pixels that were approximated '
                    '(checkerboarding, low-resolution tiles, etc.).',
        none_value_reason=none_value_reason,
        improvement_direction=improvement_direction.DOWN)
コード例 #9
0
    def GetTimeStats(self):
        """Calculate time stamp stats for all remote stream events."""
        stats = {}
        for stream, relevant_events in self.stream_to_events.iteritems():
            if len(relevant_events) == 1:
                logging.debug('Found a stream=%s with just one event', stream)
                continue
            if not self._IsRemoteStream(stream):
                logging.info('Skipping processing of local stream: %s', stream)
                continue

            cadence = self._GetCadence(relevant_events)
            if not cadence:
                stats = TimeStats()
                stats.invalid_data = True
                return stats

            frame_distribution = self._GetSourceToOutputDistribution(cadence)
            fps = self._GetFpsFromCadence(frame_distribution)

            drift_time_stats = self._GetDrifTimeStats(relevant_events, cadence)
            (drift_time, rendering_length_error) = drift_time_stats

            # Drift time normalization.
            mean_drift_time = statistics.ArithmeticMean(drift_time)
            norm_drift_time = [abs(x - mean_drift_time) for x in drift_time]

            smoothness_stats = self._GetSmoothnessStats(norm_drift_time)
            (percent_badly_oos, percent_out_of_sync,
             smoothness_score) = smoothness_stats

            freezing_score = self._GetFreezingScore(frame_distribution)

            stats = TimeStats(drift_time=drift_time,
                              percent_badly_out_of_sync=percent_badly_oos,
                              percent_out_of_sync=percent_out_of_sync,
                              smoothness_score=smoothness_score,
                              freezing_score=freezing_score,
                              rendering_length_error=rendering_length_error,
                              fps=fps,
                              frame_distribution=frame_distribution)
        return stats
コード例 #10
0
    def _ComputeFrameTimeMetric(self, page, stats):
        """Returns Values for the frame time metrics.

    This includes the raw and mean frame times, as well as the mostly_smooth
    metric which tracks whether we hit 60 fps for 95% of the frames.
    """
        frame_times = None
        mean_frame_time = None
        mostly_smooth = None
        none_value_reason = None
        if self._HasEnoughFrames(stats.frame_timestamps):
            frame_times = FlattenList(stats.frame_times)
            mean_frame_time = round(statistics.ArithmeticMean(frame_times), 3)
            # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0.
            percentile_95 = statistics.Percentile(frame_times, 95.0)
            mostly_smooth = 1.0 if percentile_95 < 19.0 else 0.0
        else:
            none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
        return (list_of_scalar_values.ListOfScalarValues(
            page,
            'frame_times',
            'ms',
            frame_times,
            description='List of raw frame times, helpful to understand the '
            'other metrics.',
            none_value_reason=none_value_reason),
                scalar.ScalarValue(
                    page,
                    'mean_frame_time',
                    'ms',
                    mean_frame_time,
                    description='Arithmetic mean of frame times.',
                    none_value_reason=none_value_reason),
                scalar.ScalarValue(
                    page,
                    'mostly_smooth',
                    'score',
                    mostly_smooth,
                    description='Were 95 percent of the frames hitting 60 fps?'
                    'boolean value (1/0).',
                    none_value_reason=none_value_reason))
コード例 #11
0
    def StopMonitoringPower(self):
        assert self._ippet_handle, (
            'Called StopMonitoringPower() before StartMonitoringPower().')
        try:
            # Stop IPPET.
            with contextlib.closing(
                    win32event.OpenEvent(win32event.EVENT_MODIFY_STATE, False,
                                         QUIT_EVENT)) as quit_event:
                win32event.SetEvent(quit_event)

            # Wait for IPPET process to exit.
            wait_code = win32event.WaitForSingleObject(self._ippet_handle,
                                                       20000)
            if wait_code != win32event.WAIT_OBJECT_0:
                if wait_code == win32event.WAIT_TIMEOUT:
                    raise IppetError('Timed out waiting for IPPET to close.')
                else:
                    raise IppetError(
                        'Error code %d while waiting for IPPET to close.' %
                        wait_code)

        finally:  # If we need to, forcefully kill IPPET.
            try:
                exit_code = win32process.GetExitCodeProcess(self._ippet_handle)
                if exit_code == win32con.STILL_ACTIVE:
                    win32process.TerminateProcess(self._ippet_handle, 0)
                    raise IppetError(
                        'IPPET is still running but should have stopped.')
                elif exit_code != 0:
                    raise IppetError('IPPET closed with exit code %d.' %
                                     exit_code)
            finally:
                self._ippet_handle.Close()
                self._ippet_handle = None

        # Read IPPET's log file.
        log_file = os.path.join(self._output_dir, 'ippet_log_processes.xls')
        try:
            with open(log_file, 'r') as f:
                reader = csv.DictReader(f, dialect='excel-tab')
                data = list(reader)[
                    1:]  # The first iteration only reports temperature.
        except IOError:
            logging.error('Output directory %s contains: %s', self._output_dir,
                          os.listdir(self._output_dir))
            raise
        shutil.rmtree(self._output_dir)
        self._output_dir = None

        def get(*args, **kwargs):
            """Pull all iterations of a field from the IPPET data as a list.

      Args:
        args: A list representing the field name.
        mult: A cosntant to multiply the field's value by, for unit conversions.
        default: The default value if the field is not found in the iteration.

      Returns:
        A list containing the field's value across all iterations.
      """
            key = '\\\\.\\' + '\\'.join(args)

            def value(line):
                if key in line:
                    return line[key]
                elif 'default' in kwargs:
                    return kwargs['default']
                else:
                    raise KeyError('Key "%s" not found in data and '
                                   'no default was given.' % key)

            return [
                float(value(line)) * kwargs.get('mult', 1) for line in data
            ]

        result = {
            'identifier':
            'ippet',
            'power_samples_mw':
            get('Power(_Total)', 'Package W', mult=1000),
            'energy_consumption_mwh':
            sum(
                map(operator.mul, get('Power(_Total)', 'Package W', mult=1000),
                    get('sys', 'Interval(secs)', mult=1. / 3600.))),
            'component_utilization': {
                'whole_package': {
                    'average_temperature_c':
                    statistics.ArithmeticMean(
                        get('Temperature(Package)', 'Current C')),
                },
                'cpu': {
                    'power_samples_mw':
                    get('Power(_Total)', 'CPU W', mult=1000),
                    'energy_consumption_mwh':
                    sum(
                        map(operator.mul,
                            get('Power(_Total)', 'CPU W', mult=1000),
                            get('sys', 'Interval(secs)', mult=1. / 3600.))),
                },
                'disk': {
                    'power_samples_mw':
                    get('Power(_Total)', 'Disk W', mult=1000),
                    'energy_consumption_mwh':
                    sum(
                        map(operator.mul,
                            get('Power(_Total)', 'Disk W', mult=1000),
                            get('sys', 'Interval(secs)', mult=1. / 3600.))),
                },
                'gpu': {
                    'power_samples_mw':
                    get('Power(_Total)', 'GPU W', mult=1000),
                    'energy_consumption_mwh':
                    sum(
                        map(operator.mul,
                            get('Power(_Total)', 'GPU W', mult=1000),
                            get('sys', 'Interval(secs)', mult=1. / 3600.))),
                },
            },
        }

        # Find Chrome processes in data. Note that this won't work if there are
        # extra Chrome processes lying around.
        chrome_keys = set()
        for iteration in data:
            for key in iteration.iterkeys():
                parts = key.split('\\')
                if (len(parts) >= 4 and re.match(
                        r'Process\(Google Chrome [0-9]+\)', parts[3])):
                    chrome_keys.add(parts[3])
        # Add Chrome process power usage to result.
        # Note that this is only an estimate of Chrome's CPU power usage.
        if chrome_keys:
            per_process_power_usage = [
                get(key, 'CPU Power W', default=0, mult=1000)
                for key in chrome_keys
            ]
            result['application_energy_consumption_mwh'] = (sum(
                map(operator.mul, map(sum, zip(*per_process_power_usage)),
                    get('sys', 'Interval(secs)', mult=1. / 3600.))))

        return result
コード例 #12
0
    def AddResults(self, model, renderer_thread, interaction_records, results):
        self.VerifyNonOverlappedRecords(interaction_records)
        renderer_process = renderer_thread.parent
        stats = rendering_stats.RenderingStats(
            renderer_process, model.browser_process,
            [r.GetBounds() for r in interaction_records])

        input_event_latency = FlattenList(stats.input_event_latency)
        if input_event_latency:
            mean_input_event_latency = statistics.ArithmeticMean(
                input_event_latency)
            input_event_latency_discrepancy = statistics.DurationsDiscrepancy(
                input_event_latency)
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'mean_input_event_latency', 'ms',
                                   round(mean_input_event_latency, 3)))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'input_event_latency_discrepancy', 'ms',
                                   round(input_event_latency_discrepancy, 4)))
        scroll_update_latency = FlattenList(stats.scroll_update_latency)
        if scroll_update_latency:
            mean_scroll_update_latency = statistics.ArithmeticMean(
                scroll_update_latency)
            scroll_update_latency_discrepancy = statistics.DurationsDiscrepancy(
                scroll_update_latency)
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'mean_scroll_update_latency', 'ms',
                                   round(mean_scroll_update_latency, 3)))
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'scroll_update_latency_discrepancy', 'ms',
                                   round(scroll_update_latency_discrepancy,
                                         4)))
        gesture_scroll_update_latency = FlattenList(
            stats.gesture_scroll_update_latency)
        if gesture_scroll_update_latency:
            results.AddValue(
                scalar.ScalarValue(results.current_page,
                                   'first_gesture_scroll_update_latency', 'ms',
                                   round(gesture_scroll_update_latency[0], 4)))

        # List of queueing durations.
        frame_queueing_durations = FlattenList(stats.frame_queueing_durations)
        if frame_queueing_durations:
            results.AddValue(
                list_of_scalar_values.ListOfScalarValues(
                    results.current_page, 'queueing_durations', 'ms',
                    frame_queueing_durations))

        # List of raw frame times.
        frame_times = FlattenList(stats.frame_times)
        results.AddValue(
            list_of_scalar_values.ListOfScalarValues(
                results.current_page,
                'frame_times',
                'ms',
                frame_times,
                description=
                'List of raw frame times, helpful to understand the other '
                'metrics.'))

        # Arithmetic mean of frame times.
        mean_frame_time = statistics.ArithmeticMean(frame_times)
        results.AddValue(
            scalar.ScalarValue(results.current_page,
                               'mean_frame_time',
                               'ms',
                               round(mean_frame_time, 3),
                               description='Arithmetic mean of frame times.'))

        # Absolute discrepancy of frame time stamps.
        frame_discrepancy = statistics.TimestampsDiscrepancy(
            stats.frame_timestamps)
        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                'jank',
                'ms',
                round(frame_discrepancy, 4),
                description='Absolute discrepancy of frame time stamps, where '
                'discrepancy is a measure of irregularity. It quantifies '
                'the worst jank. For a single pause, discrepancy '
                'corresponds to the length of this pause in milliseconds. '
                'Consecutive pauses increase the discrepancy. This metric '
                'is important because even if the mean and 95th '
                'percentile are good, one long pause in the middle of an '
                'interaction is still bad.'))

        # Are we hitting 60 fps for 95 percent of all frames?
        # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0.
        percentile_95 = statistics.Percentile(frame_times, 95.0)
        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                'mostly_smooth',
                'score',
                1.0 if percentile_95 < 19.0 else 0.0,
                description='Were 95 percent of the frames hitting 60 fps?'
                'boolean value (1/0).'))

        # Mean percentage of pixels approximated (missing tiles, low resolution
        # tiles, non-ideal resolution tiles).
        results.AddValue(
            scalar.ScalarValue(
                results.current_page,
                'mean_pixels_approximated',
                'percent',
                round(
                    statistics.ArithmeticMean(
                        FlattenList(stats.approximated_pixel_percentages)), 3),
                description='Percentage of pixels that were approximated '
                '(checkerboarding, low-resolution tiles, etc.).'))