Exemplo n.º 1
0
def print_aggregated_stats(stats_list):
  """Prints the aggregated stats of given |stats_list|."""
  # Skip incomplete stats (probably crashed during this run).  We collect
  # enough runs to make up for an occasional missed run.
  stat_list = [stats for stats in stats_list if stats.is_complete()]

  raw_stats = _build_raw_stats(stats_list)

  # Builds a dict from key to (median, 90-percentile).
  aggregated_stats = {
      key: statistics.compute_percentiles(value, (50, 90))
      for key, value in raw_stats.iteritems()
  }

  # If there is more than 1 stats, print the VPERF= and VRAWPERF= lines.
  if len(stats_list) > 1:
    # Print VPERF= lines.
    for name in _ALL_STAT_VARS:
      unit = 'ms' if name.endswith('_ms') else 'MB'
      median, p90 = aggregated_stats[name]
      print 'VPERF=%(name)s: %(median).2f%(unit)s 90%%=%(p90).2f' % {
          'name': name,
          'unit': unit,
          'median': median,
          'p90': p90,
      }

    # Print VRAWPERF= line.
    print 'VRAWPERF=%s' % dict(raw_stats)

  # Note: since each value is the median for each data set, they are not
  # guaranteed to add up.
  print ('\nPERF=boot:%dms (preEmbed:%dms + pluginLoad:%dms + onResume:%dms),'
         '\n     virt:%.1fMB, res:%.1fMB, pdirt:%.1fMB, runs:%d\n' % (
             aggregated_stats['boot_time_ms'][0],
             aggregated_stats['pre_embed_time_ms'][0],
             aggregated_stats['plugin_load_time_ms'][0],
             aggregated_stats['on_resume_time_ms'][0],
             aggregated_stats['app_virt_mem'][0],
             aggregated_stats['app_res_mem'][0],
             aggregated_stats['app_pdirt_mem'][0],
             len(stat_list)))
Exemplo n.º 2
0
def print_aggregated_stats(stats_list):
    """Prints the aggregated stats of given |stats_list|."""
    # Skip incomplete stats (probably crashed during this run).  We collect
    # enough runs to make up for an occasional missed run.
    stat_list = [stats for stats in stats_list if stats.is_complete()]

    raw_stats = _build_raw_stats(stats_list)

    # Builds a dict from key to (median, 90-percentile).
    aggregated_stats = {
        key: statistics.compute_percentiles(value, (50, 90))
        for key, value in raw_stats.iteritems()
    }

    # If there is more than 1 stats, print the VPERF= and VRAWPERF= lines.
    if len(stats_list) > 1:
        # Print VPERF= lines.
        for name in _ALL_STAT_VARS:
            unit = 'ms' if name.endswith('_ms') else 'MB'
            median, p90 = aggregated_stats[name]
            print 'VPERF=%(name)s: %(median).2f%(unit)s 90%%=%(p90).2f' % {
                'name': name,
                'unit': unit,
                'median': median,
                'p90': p90,
            }

        # Print VRAWPERF= line.
        print 'VRAWPERF=%s' % dict(raw_stats)

    # Note: since each value is the median for each data set, they are not
    # guaranteed to add up.
    print(
        '\nPERF=boot:%dms (preEmbed:%dms + pluginLoad:%dms + onResume:%dms),'
        '\n     virt:%.1fMB, res:%.1fMB, pdirt:%.1fMB, runs:%d\n' %
        (aggregated_stats['boot_time_ms'][0],
         aggregated_stats['pre_embed_time_ms'][0],
         aggregated_stats['plugin_load_time_ms'][0],
         aggregated_stats['on_resume_time_ms'][0],
         aggregated_stats['app_virt_mem'][0],
         aggregated_stats['app_res_mem'][0],
         aggregated_stats['app_pdirt_mem'][0], len(stat_list)))
Exemplo n.º 3
0
def bootstrap_estimation(
    ctrl_sample, expt_sample, statistic, confidence_level):
  """Estimates confidence interval of difference of a statistic by Bootstrap.

  Args:
    ctrl_sample: A control sample as a list of numbers.
    expt_sample: An experiment sample as a list of numbers.
    statistic: A function that computes a statistic from a sample.
    confidence_level: An integer that specifies requested confidence level
        in percentage, e.g. 90, 95, 99.

  Returns:
    Estimated range as a number tuple.
  """
  bootstrap_distribution = []
  for _ in xrange(1000):
    bootstrap_distribution.append(
        statistic(bootstrap_sample(expt_sample)) -
        statistic(bootstrap_sample(ctrl_sample)))
  return statistics.compute_percentiles(
      bootstrap_distribution, (100 - confidence_level, confidence_level))
Exemplo n.º 4
0
def bootstrap_estimation(ctrl_sample, expt_sample, statistic,
                         confidence_level):
    """Estimates confidence interval of difference of a statistic by Bootstrap.

  Args:
    ctrl_sample: A control sample as a list of numbers.
    expt_sample: An experiment sample as a list of numbers.
    statistic: A function that computes a statistic from a sample.
    confidence_level: An integer that specifies requested confidence level
        in percentage, e.g. 90, 95, 99.

  Returns:
    Estimated range as a number tuple.
  """
    bootstrap_distribution = []
    for _ in xrange(1000):
        bootstrap_distribution.append(
            statistic(bootstrap_sample(expt_sample)) -
            statistic(bootstrap_sample(ctrl_sample)))
    return statistics.compute_percentiles(
        bootstrap_distribution, (100 - confidence_level, confidence_level))
Exemplo n.º 5
0
 def test_compute_percentiles(self):
   self.assertTrue(all(map(math.isnan, statistics.compute_percentiles([]))))
   self.assertEquals(statistics.compute_median([5, 8]),
                     statistics.compute_percentiles([5, 8], [50])[0])
   self.assertEquals(statistics.compute_median([2, 3, 7]),
                     statistics.compute_percentiles([2, 3, 7], [50])[0])
   # All expected values below agree with Google Docs.
   self.assertEquals((42, 42), statistics.compute_percentiles([42]))
   self.assertEquals((6.5, 7.7), statistics.compute_percentiles([5, 8]))
   self.assertEquals((3, 6.2), statistics.compute_percentiles([2, 3, 7]))
   self.assertEquals((5, 8.4), statistics.compute_percentiles([2, 3, 7, 9]))
   self.assertEquals((7, 63.6),
                     statistics.compute_percentiles([2, 3, 7, 9, 100]))
   self.assertEquals((39.5, 43.4),
                     statistics.compute_percentiles([6, 7, 15, 36, 39, 40,
                                                     41, 42, 43, 47]))
   self.assertEquals((40.0, 47.0),
                     statistics.compute_percentiles([6, 7, 15, 36, 39, 40,
                                                     41, 42, 43, 47, 49]))
   self.assertEquals((37.5, 40.5),
                     statistics.compute_percentiles([7, 15, 36, 39, 40, 41]))
   self.assertEquals((39, 42.2),
                     statistics.compute_percentiles([6, 7, 15, 36, 39, 40,
                                                     41, 42, 43]))
Exemplo n.º 6
0
 def test_compute_percentiles(self):
     self.assertTrue(
         all(map(math.isnan, statistics.compute_percentiles([]))))
     self.assertEquals(statistics.compute_median([5, 8]),
                       statistics.compute_percentiles([5, 8], [50])[0])
     self.assertEquals(statistics.compute_median([2, 3, 7]),
                       statistics.compute_percentiles([2, 3, 7], [50])[0])
     # All expected values below agree with Google Docs.
     self.assertEquals((42, 42), statistics.compute_percentiles([42]))
     self.assertEquals((6.5, 7.7), statistics.compute_percentiles([5, 8]))
     self.assertEquals((3, 6.2), statistics.compute_percentiles([2, 3, 7]))
     self.assertEquals((5, 8.4),
                       statistics.compute_percentiles([2, 3, 7, 9]))
     self.assertEquals((7, 63.6),
                       statistics.compute_percentiles([2, 3, 7, 9, 100]))
     self.assertEquals((39.5, 43.4),
                       statistics.compute_percentiles(
                           [6, 7, 15, 36, 39, 40, 41, 42, 43, 47]))
     self.assertEquals((40.0, 47.0),
                       statistics.compute_percentiles(
                           [6, 7, 15, 36, 39, 40, 41, 42, 43, 47, 49]))
     self.assertEquals(
         (37.5, 40.5),
         statistics.compute_percentiles([7, 15, 36, 39, 40, 41]))
     self.assertEquals(
         (39, 42.2),
         statistics.compute_percentiles([6, 7, 15, 36, 39, 40, 41, 42, 43]))