Esempio n. 1
0
    def test_get_date(self):
        date = datetime.datetime.now().isoformat(' ')
        run = pyperf.Run([1.0], metadata={'date': date},
                         collect_metadata=False)
        self.assertEqual(run._get_date(), date)

        run = pyperf.Run([1.0], collect_metadata=False)
        self.assertIsNone(run._get_date())
Esempio n. 2
0
    def test_number_types(self):
        # ensure that all types of numbers are accepted
        for number_type in NUMBER_TYPES:
            run = pyperf.Run([number_type(1)], collect_metadata=False)
            self.assertIsInstance(run.values[0], number_type)

            run = pyperf.Run([5], warmups=[(4, number_type(3))],
                             collect_metadata=False)
            self.assertEqual(run.warmups, ((4, 3),))
            self.assertIsInstance(run.warmups[0][1], number_type)
Esempio n. 3
0
    def test_constructor(self):
        # need at least one value or one warmup value
        with self.assertRaises(ValueError):
            pyperf.Run([], collect_metadata=False)
        pyperf.Run([1.0], collect_metadata=False)
        pyperf.Run([], warmups=[(4, 1.0)], collect_metadata=False)

        # number of loops
        with self.assertRaises(ValueError):
            pyperf.Run([1.0], metadata={'loops': -1}, collect_metadata=False)
        with self.assertRaises(ValueError):
            pyperf.Run([1.0], metadata={'inner_loops': 0}, collect_metadata=False)

        # loops type error
        with self.assertRaises(ValueError):
            pyperf.Run([1.0], metadata={'loops': 1.0}, collect_metadata=False)
        with self.assertRaises(ValueError):
            pyperf.Run([1.0], metadata={'inner_loops': 1.0}, collect_metadata=False)

        # metadata value must not be an empty string
        with self.assertRaises(ValueError):
            pyperf.Run([1.0], metadata={'name': ''}, collect_metadata=False)
        run = pyperf.Run([1.0], metadata={'load_avg_1min': 0.0},
                         collect_metadata=False)
        self.assertEqual(run.get_metadata()['load_avg_1min'], 0.0)
Esempio n. 4
0
    def test_extract_metadata(self):
        warmups = ((1, 5.0),)
        runs = [pyperf.Run((1.0,), warmups=warmups,
                           metadata={'name': 'bench', 'mem_usage': 5},
                           collect_metadata=False),
                pyperf.Run((2.0,), warmups=warmups,
                           metadata={'name': 'bench', 'mem_usage': 13},
                           collect_metadata=False)]
        bench = pyperf.Benchmark(runs)

        bench._extract_metadata('mem_usage')
        self.assertEqual(bench.get_values(), (5, 13))
        for run in bench.get_runs():
            self.assertEqual(run.warmups, ())
Esempio n. 5
0
    def test_add_runs(self):
        # bench 1
        values = (1.0, 2.0, 3.0)
        run = pyperf.Run(values, metadata={'name': "bench"})
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])

        # bench 2
        values2 = (4.0, 5.0, 6.0)
        run = pyperf.Run(values2, metadata={'name': "bench"})
        bench2 = pyperf.Benchmark([run])
        suite.add_runs(bench2)

        bench = suite.get_benchmark('bench')
        self.assertEqual(bench.get_values(), values + values2)
Esempio n. 6
0
def cmd_collect_metadata(args):
    filename = args.output
    if filename and os.path.exists(filename):
        print("ERROR: The JSON file %r already exists" % filename)
        sys.exit(1)

    cpus = args.affinity
    if cpus:
        if not set_cpu_affinity(cpus):
            print("ERROR: failed to set the CPU affinity")
            sys.exit(1)
    else:
        cpus = get_isolated_cpus()
        if cpus:
            set_cpu_affinity(cpus)
            # ignore if set_cpu_affinity() failed

    run = pyperf.Run([1.0])
    metadata = run.get_metadata()
    if metadata:
        print("Metadata:")
        for line in format_metadata(metadata):
            print(line)

    if filename:
        run = run._update_metadata({'name': 'metadata'})
        bench = pyperf.Benchmark([run])
        bench.dump(filename)
Esempio n. 7
0
    def create_run(self):
        start_time = monotonic_clock()
        self.compute()
        self.metadata['duration'] = monotonic_clock() - start_time

        return pyperf.Run(self.values,
                          warmups=self.warmups,
                          metadata=self.metadata,
                          collect_metadata=False)
Esempio n. 8
0
    def test_attr(self):
        run = pyperf.Run((2.0, 3.0),
                         warmups=((4, 0.5),),
                         metadata={'loops': 2, 'inner_loops': 5},
                         collect_metadata=False)
        self.assertEqual(run.get_loops(), 2)
        self.assertEqual(run.get_inner_loops(), 5)
        self.assertEqual(run.get_total_loops(), 2 * 5)
        self.assertEqual(run.values,
                         (2.0, 3.0))
        self.assertEqual(run._get_raw_values(),
                         [20.0, 30.0])
        self.assertEqual(run._get_raw_values(warmups=True),
                         [10.0, 20.0, 30.0])

        run = pyperf.Run((2.0, 3.0), warmups=((1, 1.0),))
        self.assertEqual(run.get_loops(), 1)
        self.assertEqual(run.get_inner_loops(), 1)
        self.assertEqual(run.get_total_loops(), 1)
Esempio n. 9
0
 def test_format_result(self):
     run = pyperf.Run([1.0, 1.5, 2.0],
                      warmups=[(1, 3.0)],
                      metadata={'name': 'mybench'},
                      collect_metadata=False)
     bench = pyperf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '1.50 sec +- 0.50 sec')
     self.assertEqual(cli.format_result(bench),
                      'Mean +- std dev: 1.50 sec +- 0.50 sec')
Esempio n. 10
0
def create_run(values=None, warmups=None, metadata=None):
    if values is None:
        values = (1.0,)
    if metadata is None:
        metadata = {'name': 'bench'}
    elif 'name' not in metadata:
        metadata['name'] = 'bench'
    return pyperf.Run(values, warmups,
                      metadata=metadata,
                      collect_metadata=False)
Esempio n. 11
0
    def test_compare_to(self):
        def time_func(loops):
            return 1.0

        def abs_executable(python):
            return python

        run = pyperf.Run([1.5],
                         metadata={'name': 'name'},
                         collect_metadata=False)
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])

        with ExitStack() as cm:

            def popen(*args, **kw):
                mock_popen = mock.Mock()
                mock_popen.wait.return_value = 0
                return mock_popen

            mock_subprocess = cm.enter_context(
                mock.patch('pyperf._master.subprocess'))
            mock_subprocess.Popen.side_effect = popen

            cm.enter_context(
                mock.patch('pyperf._runner.abs_executable',
                           side_effect=abs_executable))
            cm.enter_context(
                mock.patch('pyperf._master._load_suite_from_pipe',
                           return_value=suite))

            args = [
                "--python=python3.8", "--compare-to=python3.6", "--min-time=5",
                "-p1", "-w3", "-n7", "-l11"
            ]
            runner = self.create_runner(args)
            with tests.capture_stdout():
                runner.bench_time_func('name', time_func)

            def popen_call(python):
                args = [
                    python, mock.ANY, '--worker', '--pipe', mock.ANY,
                    '--worker-task=0', '--values', '7', '--min-time', '5.0',
                    '--loops', '11', '--warmups', '3'
                ]
                kw = {}
                if MS_WINDOWS:
                    kw['close_fds'] = False
                else:
                    kw['pass_fds'] = mock.ANY
                return mock.call(args, env=mock.ANY, **kw)

            call1 = popen_call('python3.6')
            call2 = popen_call('python3.8')
            mock_subprocess.Popen.assert_has_calls([call1, call2])
Esempio n. 12
0
    def test_remove_all_metadata(self):
        run = pyperf.Run((1.0,),
                         metadata={'name': 'bench', 'os': 'win', 'unit': 'byte'},
                         collect_metadata=False)
        bench = pyperf.Benchmark([run])
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench', 'os': 'win', 'unit': 'byte'})

        bench._remove_all_metadata()
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench', 'unit': 'byte'})
Esempio n. 13
0
 def create_bench(self, values, metadata=None):
     if metadata is None:
         metadata = {'name': 'bench'}
     elif 'name' not in metadata:
         metadata['name'] = 'bench'
     runs = []
     for value in values:
         run = pyperf.Run([value],
                          metadata=metadata,
                          collect_metadata=False)
         runs.append(run)
     return pyperf.Benchmark(runs)
Esempio n. 14
0
    def test_get_metadata(self):
        benchmarks = []
        for name in ('a', 'b'):
            run = pyperf.Run([1.0],
                             metadata={'name': name, 'os': 'linux'},
                             collect_metadata=False)
            bench = pyperf.Benchmark([run])
            benchmarks.append(bench)

        suite = pyperf.BenchmarkSuite(benchmarks)
        self.assertEqual(suite.get_metadata(),
                         {'os': 'linux'})
Esempio n. 15
0
 def test_format_result_calibration(self):
     run = pyperf.Run([],
                      warmups=[(100, 1.0)],
                      metadata={
                          'name': 'bench',
                          'loops': 100
                      },
                      collect_metadata=False)
     bench = pyperf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '<calibration: 100 loops>')
     self.assertEqual(cli.format_result(bench), 'Calibration: 100 loops')
     self.assertRaises(ValueError, bench.median)
Esempio n. 16
0
    def test_update_metadata(self):
        runs = []
        for value in (1.0, 2.0, 3.0):
            runs.append(pyperf.Run((value,),
                                   metadata={'name': 'bench'},
                                   collect_metadata=False))
        bench = pyperf.Benchmark(runs)
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench'})

        bench.update_metadata({'os': 'linux'})
        self.assertEqual(bench.get_metadata(),
                         {'os': 'linux', 'name': 'bench'})
Esempio n. 17
0
    def test_remove_warmups(self):
        values = [1.0, 2.0, 3.0]
        raw_values = [5.0] + values
        run = pyperf.Run(values,
                         warmups=[(1, 5.0)],
                         metadata={'name': 'bench'})
        bench = pyperf.Benchmark([run])

        self.assertEqual(bench._get_nwarmup(), 1)
        self.assertEqual(bench._get_raw_values(warmups=True), raw_values)

        with tests.temporary_directory() as tmpdir:
            filename = os.path.join(tmpdir, 'test.json')
            bench.dump(filename)

            stdout = self.run_command('convert', filename, '--remove-warmups',
                                      '--stdout')
            bench2 = pyperf.Benchmark.loads(stdout)

        self.assertEqual(bench2._get_nwarmup(), 0)
        self.assertEqual(bench2._get_raw_values(warmups=True), raw_values[1:])
Esempio n. 18
0
    def test_benchmark(self):
        values = (1.0, 1.5, 2.0)
        raw_values = tuple(value * 3 * 20 for value in values)
        runs = []
        for value in values:
            run = pyperf.Run([value],
                             warmups=[(1, 3.0)],
                             metadata={'key': 'value',
                                       'loops': 20,
                                       'inner_loops': 3,
                                       'name': 'mybench'},
                             collect_metadata=False)
            runs.append(run)
        bench = pyperf.Benchmark(runs)

        self.assertEqual(bench.get_values(), values)
        self.assertEqual(bench.get_unit(), 'second')
        self.assertEqual(bench._get_raw_values(), list(raw_values))
        self.assertEqual(bench.get_nrun(), 3)

        runs = bench.get_runs()
        self.assertIsInstance(runs, list)
        self.assertEqual(len(runs), 3)
        for run in runs:
            self.assertIsInstance(run, pyperf.Run)
            self.assertEqual(len(run._get_raw_values(True)), 2)
            self.assertEqual(run.get_loops(), 20)
            self.assertEqual(run.get_inner_loops(), 3)

        self.check_runs(bench, [(1, 3.0)], values)

        self.assertEqual(bench.get_name(), "mybench")
        self.assertEqual(bench.get_metadata(),
                         {'key': 'value',
                          'name': 'mybench',
                          'loops': 20,
                          'inner_loops': 3})
        self.assertEqual(repr(bench),
                         "<Benchmark 'mybench' with 3 runs>")
Esempio n. 19
0
 def test_get_unit(self):
     run = pyperf.Run((1.0,),
                      metadata={'name': 'bench', 'unit': 'byte'},
                      collect_metadata=False)
     bench = pyperf.Benchmark([run])
     self.assertEqual(bench.get_unit(), 'byte')
Esempio n. 20
0
 def benchmark(self, name):
     run = pyperf.Run([1.0, 1.5, 2.0],
                      metadata={'name': name},
                      collect_metadata=False)
     return pyperf.Benchmark([run])
Esempio n. 21
0
 def test_name(self):
     # name must be non-empty
     with self.assertRaises(ValueError):
         pyperf.Run([1.0], metadata={'name': '   '})
Esempio n. 22
0
 def test_name(self):
     # no name metadata
     run = pyperf.Run([1.0])
     with self.assertRaises(ValueError):
         pyperf.Benchmark([run])