Ejemplo n.º 1
0
    def test_add_runs(self):
        values1 = (1.0, 2.0, 3.0)
        bench = pyperf.Benchmark([create_run(values1)])

        values2 = (4.0, 5.0, 6.0)
        bench2 = pyperf.Benchmark([create_run(values2)])

        bench.add_runs(bench2)
        self.assertEqual(bench.get_values(), values1 + values2)
Ejemplo n.º 2
0
    def test_get_total_duration(self):
        run = create_run([1.0])
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])

        run = create_run([2.0])
        bench = pyperf.Benchmark([run])
        suite.add_runs(bench)

        self.assertEqual(suite.get_total_duration(), 3.0)
Ejemplo n.º 3
0
    def test_add_runs(self):
        # bench 1
        values = (1.0, 2.0, 3.0)
        run = pyperf.Run(values, metadata={'name': "bench"})
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])

        # bench 2
        values2 = (4.0, 5.0, 6.0)
        run = pyperf.Run(values2, metadata={'name': "bench"})
        bench2 = pyperf.Benchmark([run])
        suite.add_runs(bench2)

        bench = suite.get_benchmark('bench')
        self.assertEqual(bench.get_values(), values + values2)
Ejemplo n.º 4
0
    def test_get_dates(self):
        bench = pyperf.Benchmark([create_run()])
        self.assertIsNone(bench.get_dates())

        metadata = {'date': '2016-07-20T14:06:00', 'duration': 60.0}
        bench = pyperf.Benchmark([create_run(metadata=metadata)])
        self.assertEqual(bench.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 7, 0)))

        metadata = {'date': '2016-07-20T14:10:00', 'duration': 60.0}
        bench.add_run(create_run(metadata=metadata))
        self.assertEqual(bench.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 11, 0)))
Ejemplo n.º 5
0
 def test_stats_empty(self):
     run = create_run(values=[], warmups=[(4, 10.0)])
     bench = pyperf.Benchmark([run])
     self.assertRaises(Exception, bench.mean)
     self.assertRaises(Exception, bench.median)
     self.assertRaises(Exception, bench.stdev)
     self.assertRaises(Exception, bench.median_abs_dev)
Ejemplo n.º 6
0
    def test_get_warmups(self):
        # exact
        runs = [create_run((1.0, 2.0, 3.0), warmups=[(1, 1.0)]),
                create_run((5.0, 6.0), warmups=[(1, 4.0)])]
        bench = pyperf.Benchmark(runs)
        nwarmup = bench._get_nwarmup()
        self.assertEqual(nwarmup, 1)
        self.assertIsInstance(nwarmup, int)

        # average
        runs = [create_run([3.0], warmups=[(1, 1.0), (1, 2.0)]),
                create_run([4.0, 5.0, 6.0])]
        bench = pyperf.Benchmark(runs)
        nwarmup = bench._get_nwarmup()
        self.assertEqual(nwarmup, 1)
        self.assertIsInstance(nwarmup, float)
Ejemplo n.º 7
0
    def test__get_nvalue_per_run(self):
        # exact
        runs = [create_run([1.0, 2.0, 3.0]),
                create_run([4.0, 5.0, 6.0])]
        bench = pyperf.Benchmark(runs)
        nvalue = bench._get_nvalue_per_run()
        self.assertEqual(nvalue, 3)
        self.assertIsInstance(nvalue, int)

        # average
        runs = [create_run([1.0, 2.0, 3.0, 4.0]),
                create_run([5.0, 6.0])]
        bench = pyperf.Benchmark(runs)
        nvalue = bench._get_nvalue_per_run()
        self.assertEqual(nvalue, 3.0)
        self.assertIsInstance(nvalue, float)
Ejemplo n.º 8
0
def cmd_collect_metadata(args):
    filename = args.output
    if filename and os.path.exists(filename):
        print("ERROR: The JSON file %r already exists" % filename)
        sys.exit(1)

    cpus = args.affinity
    if cpus:
        if not set_cpu_affinity(cpus):
            print("ERROR: failed to set the CPU affinity")
            sys.exit(1)
    else:
        cpus = get_isolated_cpus()
        if cpus:
            set_cpu_affinity(cpus)
            # ignore if set_cpu_affinity() failed

    run = pyperf.Run([1.0])
    metadata = run.get_metadata()
    if metadata:
        print("Metadata:")
        for line in format_metadata(metadata):
            print(line)

    if filename:
        run = run._update_metadata({'name': 'metadata'})
        bench = pyperf.Benchmark([run])
        bench.dump(filename)
Ejemplo n.º 9
0
 def _worker(self, task):
     self._cpu_affinity()
     self._process_priority()
     run = task.create_run()
     bench = pyperf.Benchmark((run, ))
     self._display_result(bench, checks=False)
     return bench
Ejemplo n.º 10
0
 def test_stats(self):
     values = [float(value) for value in range(1, 96)]
     run = create_run(values)
     bench = pyperf.Benchmark([run])
     self.assertEqual(bench.mean(), 48.0)
     self.assertEqual(bench.median(), 48.0)
     self.assertAlmostEqual(bench.stdev(), 27.5680, delta=1e-3)
     self.assertEqual(bench.median_abs_dev(), 24.0)
Ejemplo n.º 11
0
 def test_stats_single(self):
     values = [7.0]
     run = create_run(values)
     bench = pyperf.Benchmark([run])
     self.assertEqual(bench.mean(), 7.0)
     self.assertEqual(bench.median(), 7.0)
     self.assertRaises(Exception, bench.stdev)
     self.assertEqual(bench.median_abs_dev(), 0.0)
Ejemplo n.º 12
0
 def test_stats_same(self):
     values = [5.0 for i in range(10)]
     run = create_run(values)
     bench = pyperf.Benchmark([run])
     self.assertEqual(bench.mean(), 5.0)
     self.assertEqual(bench.median(), 5.0)
     self.assertEqual(bench.stdev(), 0.0)
     self.assertEqual(bench.median_abs_dev(), 0.0)
Ejemplo n.º 13
0
    def test_get_dates(self):
        run = create_run(metadata={'date': '2016-07-20T14:06:00',
                                   'duration': 60.0,
                                   'name': 'bench1'})
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])
        self.assertEqual(suite.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 7, 0)))

        run = create_run(metadata={'date': '2016-07-20T14:10:00',
                                   'duration': 60.0,
                                   'name': 'bench2'})
        bench = pyperf.Benchmark([run])
        suite.add_benchmark(bench)
        self.assertEqual(suite.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 11, 0)))
Ejemplo n.º 14
0
 def test_format_result(self):
     run = pyperf.Run([1.0, 1.5, 2.0],
                      warmups=[(1, 3.0)],
                      metadata={'name': 'mybench'},
                      collect_metadata=False)
     bench = pyperf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '1.50 sec +- 0.50 sec')
     self.assertEqual(cli.format_result(bench),
                      'Mean +- std dev: 1.50 sec +- 0.50 sec')
Ejemplo n.º 15
0
    def test_get_total_duration(self):
        # use duration metadata
        runs = [create_run([0.1], metadata={'duration': 1.0}),
                create_run([0.1], metadata={'duration': 2.0})]
        bench = pyperf.Benchmark(runs)
        self.assertEqual(bench.get_total_duration(), 3.0)

        # run without duration metadata
        bench.add_run(create_run([5.0], metadata={}))
        self.assertEqual(bench.get_total_duration(), 8.0)
Ejemplo n.º 16
0
    def test_compare_to(self):
        def time_func(loops):
            return 1.0

        def abs_executable(python):
            return python

        run = pyperf.Run([1.5],
                         metadata={'name': 'name'},
                         collect_metadata=False)
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])

        with ExitStack() as cm:

            def popen(*args, **kw):
                mock_popen = mock.Mock()
                mock_popen.wait.return_value = 0
                return mock_popen

            mock_subprocess = cm.enter_context(
                mock.patch('pyperf._master.subprocess'))
            mock_subprocess.Popen.side_effect = popen

            cm.enter_context(
                mock.patch('pyperf._runner.abs_executable',
                           side_effect=abs_executable))
            cm.enter_context(
                mock.patch('pyperf._master._load_suite_from_pipe',
                           return_value=suite))

            args = [
                "--python=python3.8", "--compare-to=python3.6", "--min-time=5",
                "-p1", "-w3", "-n7", "-l11"
            ]
            runner = self.create_runner(args)
            with tests.capture_stdout():
                runner.bench_time_func('name', time_func)

            def popen_call(python):
                args = [
                    python, mock.ANY, '--worker', '--pipe', mock.ANY,
                    '--worker-task=0', '--values', '7', '--min-time', '5.0',
                    '--loops', '11', '--warmups', '3'
                ]
                kw = {}
                if MS_WINDOWS:
                    kw['close_fds'] = False
                else:
                    kw['pass_fds'] = mock.ANY
                return mock.call(args, env=mock.ANY, **kw)

            call1 = popen_call('python3.6')
            call2 = popen_call('python3.8')
            mock_subprocess.Popen.assert_has_calls([call1, call2])
Ejemplo n.º 17
0
    def test_remove_all_metadata(self):
        run = pyperf.Run((1.0,),
                         metadata={'name': 'bench', 'os': 'win', 'unit': 'byte'},
                         collect_metadata=False)
        bench = pyperf.Benchmark([run])
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench', 'os': 'win', 'unit': 'byte'})

        bench._remove_all_metadata()
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench', 'unit': 'byte'})
Ejemplo n.º 18
0
 def create_bench(self, values, metadata=None):
     if metadata is None:
         metadata = {'name': 'bench'}
     elif 'name' not in metadata:
         metadata['name'] = 'bench'
     runs = []
     for value in values:
         run = pyperf.Run([value],
                          metadata=metadata,
                          collect_metadata=False)
         runs.append(run)
     return pyperf.Benchmark(runs)
Ejemplo n.º 19
0
    def test_get_metadata(self):
        benchmarks = []
        for name in ('a', 'b'):
            run = pyperf.Run([1.0],
                             metadata={'name': name, 'os': 'linux'},
                             collect_metadata=False)
            bench = pyperf.Benchmark([run])
            benchmarks.append(bench)

        suite = pyperf.BenchmarkSuite(benchmarks)
        self.assertEqual(suite.get_metadata(),
                         {'os': 'linux'})
Ejemplo n.º 20
0
 def test_format_result_calibration(self):
     run = pyperf.Run([],
                      warmups=[(100, 1.0)],
                      metadata={
                          'name': 'bench',
                          'loops': 100
                      },
                      collect_metadata=False)
     bench = pyperf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '<calibration: 100 loops>')
     self.assertEqual(cli.format_result(bench), 'Calibration: 100 loops')
     self.assertRaises(ValueError, bench.median)
Ejemplo n.º 21
0
    def test_update_metadata(self):
        runs = []
        for value in (1.0, 2.0, 3.0):
            runs.append(pyperf.Run((value,),
                                   metadata={'name': 'bench'},
                                   collect_metadata=False))
        bench = pyperf.Benchmark(runs)
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench'})

        bench.update_metadata({'os': 'linux'})
        self.assertEqual(bench.get_metadata(),
                         {'os': 'linux', 'name': 'bench'})
Ejemplo n.º 22
0
    def test_extract_metadata(self):
        warmups = ((1, 5.0),)
        runs = [pyperf.Run((1.0,), warmups=warmups,
                           metadata={'name': 'bench', 'mem_usage': 5},
                           collect_metadata=False),
                pyperf.Run((2.0,), warmups=warmups,
                           metadata={'name': 'bench', 'mem_usage': 13},
                           collect_metadata=False)]
        bench = pyperf.Benchmark(runs)

        bench._extract_metadata('mem_usage')
        self.assertEqual(bench.get_values(), (5, 13))
        for run in bench.get_runs():
            self.assertEqual(run.warmups, ())
Ejemplo n.º 23
0
    def test_remove_warmups(self):
        values = [1.0, 2.0, 3.0]
        raw_values = [5.0] + values
        run = pyperf.Run(values,
                         warmups=[(1, 5.0)],
                         metadata={'name': 'bench'})
        bench = pyperf.Benchmark([run])

        self.assertEqual(bench._get_nwarmup(), 1)
        self.assertEqual(bench._get_raw_values(warmups=True), raw_values)

        with tests.temporary_directory() as tmpdir:
            filename = os.path.join(tmpdir, 'test.json')
            bench.dump(filename)

            stdout = self.run_command('convert', filename, '--remove-warmups',
                                      '--stdout')
            bench2 = pyperf.Benchmark.loads(stdout)

        self.assertEqual(bench2._get_nwarmup(), 0)
        self.assertEqual(bench2._get_raw_values(warmups=True), raw_values[1:])
Ejemplo n.º 24
0
    def test_benchmark(self):
        values = (1.0, 1.5, 2.0)
        raw_values = tuple(value * 3 * 20 for value in values)
        runs = []
        for value in values:
            run = pyperf.Run([value],
                             warmups=[(1, 3.0)],
                             metadata={'key': 'value',
                                       'loops': 20,
                                       'inner_loops': 3,
                                       'name': 'mybench'},
                             collect_metadata=False)
            runs.append(run)
        bench = pyperf.Benchmark(runs)

        self.assertEqual(bench.get_values(), values)
        self.assertEqual(bench.get_unit(), 'second')
        self.assertEqual(bench._get_raw_values(), list(raw_values))
        self.assertEqual(bench.get_nrun(), 3)

        runs = bench.get_runs()
        self.assertIsInstance(runs, list)
        self.assertEqual(len(runs), 3)
        for run in runs:
            self.assertIsInstance(run, pyperf.Run)
            self.assertEqual(len(run._get_raw_values(True)), 2)
            self.assertEqual(run.get_loops(), 20)
            self.assertEqual(run.get_inner_loops(), 3)

        self.check_runs(bench, [(1, 3.0)], values)

        self.assertEqual(bench.get_name(), "mybench")
        self.assertEqual(bench.get_metadata(),
                         {'key': 'value',
                          'name': 'mybench',
                          'loops': 20,
                          'inner_loops': 3})
        self.assertEqual(repr(bench),
                         "<Benchmark 'mybench' with 3 runs>")
Ejemplo n.º 25
0
    def test_add_run(self):
        metadata = {'name': 'bench', 'hostname': 'toto'}
        runs = [create_run(metadata=metadata)]
        bench = pyperf.Benchmark(runs)

        # expect Run, not list
        self.assertRaises(TypeError, bench.add_run, [1.0])

        bench.add_run(create_run(metadata=metadata))

        # incompatible: name is different
        metadata = {'name': 'bench2', 'hostname': 'toto'}
        with self.assertRaises(ValueError):
            bench.add_run(create_run(metadata=metadata))

        # incompatible: hostname is different
        metadata = {'name': 'bench', 'hostname': 'homer'}
        with self.assertRaises(ValueError):
            bench.add_run(create_run(metadata=metadata))

        # compatible (same metadata)
        metadata = {'name': 'bench', 'hostname': 'toto'}
        bench.add_run(create_run(metadata=metadata))
Ejemplo n.º 26
0
    def test_get_nvalue(self):
        bench = pyperf.Benchmark([create_run([2.0, 3.0])])
        self.assertEqual(bench.get_nvalue(), 2)

        bench.add_run(create_run([5.0]))
        self.assertEqual(bench.get_nvalue(), 3)
Ejemplo n.º 27
0
 def create_dummy_benchmark(self):
     runs = [create_run()]
     return pyperf.Benchmark(runs)
Ejemplo n.º 28
0
 def benchmark(self, name):
     run = pyperf.Run([1.0, 1.5, 2.0],
                      metadata={'name': name},
                      collect_metadata=False)
     return pyperf.Benchmark([run])
Ejemplo n.º 29
0
 def test_update_metadata_inner_loops(self):
     run = create_run(metadata={'inner_loops': 5})
     bench = pyperf.Benchmark([run])
     with self.assertRaises(ValueError):
         bench.update_metadata({'inner_loops': 8})
Ejemplo n.º 30
0
    def test_get_runs(self):
        run1 = create_run([1.0])
        run2 = create_run([2.0])

        bench = pyperf.Benchmark([run1, run2])
        self.assertEqual(bench.get_runs(), [run1, run2])