Example #1
0
 def test_computes_inter_quartile_range(self):
     self.assertEqual(self.samples.iqr, 0)
     self.samples.add(Sample(2, 1, 1025))
     self.samples.add(Sample(3, 1, 1050))
     self.samples.add(Sample(4, 1, 1075))
     self.samples.add(Sample(5, 1, 1100))
     self.assertEqual(self.samples.iqr, 50)
Example #2
0
 def test_init_with_samples(self):
     self.samples = PerformanceTestSamples(
         'B2', [Sample(0, 1, 1000), Sample(1, 1, 1100)])
     self.assertEqual(self.samples.count, 2)
     self.assertEqualtats((self.samples.mean, self.samples.sd,
                           self.samples.range, self.samples.spread),
                          (1050.0, 70.71, 100, 9.52 / 100))
Example #3
0
 def test_quantile(self):
     self.assertEqual(self.samples.quantile(1), 1000)
     self.assertEqual(self.samples.quantile(0), 1000)
     self.samples.add(Sample(2, 1, 1100))
     self.assertEqual(self.samples.quantile(0), 1000)
     self.assertEqual(self.samples.quantile(1), 1100)
     self.samples.add(Sample(3, 1, 1050))
     self.assertEqual(self.samples.quantile(0), 1000)
     self.assertEqual(self.samples.quantile(0.5), 1050)
     self.assertEqual(self.samples.quantile(1), 1100)
 def test_computes_five_number_summary(self):
     self.assertEqualFiveNumberSummary(self.samples, (1000, 1000, 1000, 1000, 1000))
     self.samples.add(Sample(2, 1, 1100))
     self.assertEqualFiveNumberSummary(self.samples, (1000, 1000, 1000, 1100, 1100))
     self.samples.add(Sample(3, 1, 1050))
     self.assertEqualFiveNumberSummary(self.samples, (1000, 1000, 1050, 1100, 1100))
     self.samples.add(Sample(4, 1, 1025))
     self.assertEqualFiveNumberSummary(self.samples, (1000, 1000, 1025, 1050, 1100))
     self.samples.add(Sample(5, 1, 1075))
     self.assertEqualFiveNumberSummary(self.samples, (1000, 1025, 1050, 1075, 1100))
Example #5
0
    def test_excludes_outliers_zero_IQR(self):
        self.samples = PerformanceTestSamples('Tight')
        self.samples.add(Sample(0, 2, 23))
        self.samples.add(Sample(1, 2, 18))
        self.samples.add(Sample(2, 2, 18))
        self.assertEquals(self.samples.iqr, 0)

        self.samples.exclude_outliers()

        self.assertEquals(self.samples.count, 2)
        self.assertEqualStats((self.samples.min, self.samples.max), (18, 18))
Example #6
0
 def test_can_handle_zero_runtime(self):
     # guard against dividing by 0
     self.samples = PerformanceTestSamples('Zero')
     self.samples.add(Sample(0, 1, 0))
     self.assertEqualtats(
         (self.samples.mean, self.samples.sd, self.samples.cv,
          self.samples.range, self.samples.spread), (0, 0, 0.0, 0, 0.0))
Example #7
0
 def test_computes_range_spread(self):
     ss = self.samples
     self.assertEqualtats(
         (ss.range, ss.spread), (0, 0))
     self.samples.add(Sample(2, 1, 1100))
     self.assertEqualtats(
         (ss.range, ss.spread), (100, 10.0 / 100))
Example #8
0
 def test_computes_mean_sd_cv(self):
     ss = self.samples
     self.assertEqualtats(
         (ss.mean, ss.sd, ss.cv), (1000.0, 0.0, 0.0))
     self.samples.add(Sample(2, 1, 1100))
     self.assertEqualtats(
         (ss.mean, ss.sd, ss.cv), (1050.0, 70.71, 6.7 / 100))
Example #9
0
    def test_excludes_outliers_top_only(self):
        ss = [Sample(*map(int, s.split())) for s in
              '0 1 1, 1 1 2, 2 1 2, 3 1 2, 4 1 3'.split(',')]
        self.samples = PerformanceTestSamples('Top', ss)
        self.assertEqualFiveNumberSummary(self.samples, (1, 2, 2, 2, 3))
        self.assertEqual(self.samples.iqr, 0)

        self.samples.exclude_outliers(top_only=True)

        self.assertEqual(self.samples.count, 4)
        self.assertEqualtats((self.samples.min, self.samples.max), (1, 2))
Example #10
0
def sq(t):  # status quo
    # don't need calibration, but need dummy result for merging samples
    run(t, s=1)
    for suffix in list('abcd'):
        for i in range(0, 20):
            res = run(t, 1, 0, True)
            run_name = res.name + ' i0_' + suffix
            if run_name not in BD.results:
                BD.results[run_name] = res
            else:
                s = res.samples.samples[0]
                s = Sample(i, s.num_iters, s.runtime)
                BD.results[run_name].samples.add(s)
                BD.results[run_name].involuntary_cs += res.involuntary_cs
        BD.results[run_name].samples.exclude_outliers()
        print BD.results[run_name].samples
    def test_excludes_outliers(self):
        ss = [
            Sample(*map(int, s.split()))
            for s in "0 1 1000, 1 1 1025, 2 1 1050, 3 1 1075, 4 1 1100, "
            "5 1 1000, 6 1 1025, 7 1 1050, 8 1 1075, 9 1 1100, "
            "10 1 1050, 11 1 949, 12 1 1151".split(",")
        ]
        self.samples = PerformanceTestSamples("Outliers", ss)
        self.assertEqual(self.samples.count, 13)
        self.assertEqualStats((self.samples.mean, self.samples.sd), (1050, 52.36))

        self.samples.exclude_outliers()

        self.assertEqual(self.samples.count, 11)
        self.assertEqual(self.samples.outliers, ss[11:])
        self.assertEqualFiveNumberSummary(self.samples, (1000, 1025, 1050, 1075, 1100))
        self.assertEqualStats((self.samples.mean, self.samples.sd), (1050, 35.36))
Example #12
0
def load_series(s):
    """Load series data from JSON dictionary.

    Returns `PerformanceTestResult` with `PerformanceTestSamples`.
    """
    num_iters = s['num_iters']
    ss = PerformanceTestSamples(
        s['name'], [Sample(i, num_iters, runtime)
                    for (i, runtime) in enumerate(s['data'])])
    r = PerformanceTestResult([0, s['name'], ss.count, ss.min, ss.max,
                               ss.mean, ss.sd, ss.median])
    r.samples = ss

    def _set(key):
        if key in s:
            setattr(r, key, s[key])

    map(_set, ['max_rss', 'involuntary_cs', 'voluntary_cs'])
    return r
Example #13
0
def all_stats(results, setup=0):
    name = results[0].name.split()[0] + (' all' if not setup else ' fixedAll')
    ics, vcs = 0, 0
    pts = PerformanceTestSamples(name)
    for result in results:
        correction = ir(setup / num_iters(result))
        samples = (result.samples.samples if not setup else
                   [Sample(s.i, s.num_iters, s.runtime - correction)
                    for s in result.samples.samples])
        map(pts.add, samples)
        ics += result.involuntary_cs
        vcs += result.voluntary_cs
    _all = PerformanceTestResult(
        [0, name, pts.num_samples, pts.min, pts.max,
         int(pts.mean), int(pts.sd), pts.median])
    _all.samples = pts
    _all.involuntary_cs = ics
    _all.voluntary_cs = vcs

    return _all
Example #14
0
 def setUp(self):
     self.samples = PerformanceTestSamples("B1")
     self.samples.add(Sample(7, 42, 1000))
Example #15
0
 def test_is_iterable(self):
     s = Sample(1, 2, 3)
     self.assertEqual(s[0], 1)
     self.assertEqual(s[1], 2)
     self.assertEqual(s[2], 3)
Example #16
0
 def test_has_named_fields(self):
     s = Sample(1, 2, 3)
     self.assertEqual(s.i, 1)
     self.assertEqual(s.num_iters, 2)
     self.assertEqual(s.runtime, 3)