コード例 #1
0
    def test_compare_to(self):
        runs = self.create_runs((1.0, 1.5, 2.0), {
            'hostname': 'toto',
            'python_version': '2.7'
        })
        ref_result = perf.Benchmark(runs=runs, name='py2')

        runs = self.create_runs((1.5, 2.0, 2.5), {
            'hostname': 'toto',
            'python_version': '3.4'
        })
        changed_result = perf.Benchmark(runs=runs, name='py3')

        stdout = self.compare('compare_to', ref_result, changed_result)

        expected = ('Reference: py2\n'
                    'Changed: py3\n'
                    '\n'
                    'Common metadata:\n'
                    '- hostname: toto\n'
                    '\n'
                    'py2 metadata:\n'
                    '- python_version: 2.7\n'
                    '\n'
                    'py3 metadata:\n'
                    '- python_version: 3.4\n'
                    '\n'
                    'Average: [py2] 1.50 sec +- 0.50 sec '
                    '-> [py3] 2.00 sec +- 0.50 sec: 1.3x slower\n'
                    'Not significant!')
        self.assertEqual(stdout.rstrip(), expected)
コード例 #2
0
    def test_add_runs(self):
        values1 = (1.0, 2.0, 3.0)
        bench = perf.Benchmark([create_run(values1)])

        values2 = (4.0, 5.0, 6.0)
        bench2 = perf.Benchmark([create_run(values2)])

        bench.add_runs(bench2)
        self.assertEqual(bench.get_values(), values1 + values2)
コード例 #3
0
    def test_get_total_duration(self):
        run = create_run([1.0])
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])

        run = create_run([2.0])
        bench = perf.Benchmark([run])
        suite.add_runs(bench)

        self.assertEqual(suite.get_total_duration(), 3.0)
コード例 #4
0
ファイル: test_bench.py プロジェクト: umandalroald/perf
    def test_get_dates(self):
        bench = perf.Benchmark([create_run()])
        self.assertIsNone(bench.get_dates())

        metadata = {'date': '2016-07-20T14:06:00', 'duration': 60.0}
        bench = perf.Benchmark([create_run(metadata=metadata)])
        self.assertEqual(bench.get_dates(), (datetime.datetime(
            2016, 7, 20, 14, 6, 0), datetime.datetime(2016, 7, 20, 14, 7, 0)))

        metadata = {'date': '2016-07-20T14:10:00', 'duration': 60.0}
        bench.add_run(create_run(metadata=metadata))
        self.assertEqual(bench.get_dates(), (datetime.datetime(
            2016, 7, 20, 14, 6, 0), datetime.datetime(2016, 7, 20, 14, 11, 0)))
コード例 #5
0
ファイル: test_bench.py プロジェクト: umandalroald/perf
    def test__get_nvalue_per_run(self):
        # exact
        runs = [create_run([1.0, 2.0, 3.0]), create_run([4.0, 5.0, 6.0])]
        bench = perf.Benchmark(runs)
        nvalue = bench._get_nvalue_per_run()
        self.assertEqual(nvalue, 3)
        self.assertIsInstance(nvalue, int)

        # average
        runs = [create_run([1.0, 2.0, 3.0, 4.0]), create_run([5.0, 6.0])]
        bench = perf.Benchmark(runs)
        nvalue = bench._get_nvalue_per_run()
        self.assertEqual(nvalue, 3.0)
        self.assertIsInstance(nvalue, float)
コード例 #6
0
    def test_add_runs(self):
        # bench 1
        values = (1.0, 2.0, 3.0)
        run = perf.Run(values, metadata={'name': "bench"})
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])

        # bench 2
        values2 = (4.0, 5.0, 6.0)
        run = perf.Run(values2, metadata={'name': "bench"})
        bench2 = perf.Benchmark([run])
        suite.add_runs(bench2)

        bench = suite.get_benchmark('bench')
        self.assertEqual(bench.get_values(), values + values2)
コード例 #7
0
    def test_compare(self):
        runs = self.create_runs((1.0, 1.5, 2.0))
        ref_result = perf.Benchmark(runs=runs, name='py2')

        runs = self.create_runs((1.5, 2.0, 2.5))
        changed_result = perf.Benchmark(runs=runs, name='py3')

        stdout = self.compare('compare', ref_result, changed_result)

        expected = ('Reference (best): py2\n'
                    '\n'
                    'Average: [py2] 1.50 sec +- 0.50 sec '
                    '-> [py3] 2.00 sec +- 0.50 sec: 1.3x slower\n'
                    'Not significant!')
        self.assertEqual(stdout.rstrip(), expected)
コード例 #8
0
 def test_stats_empty(self):
     run = create_run(values=[], warmups=[(4, 10.0)])
     bench = perf.Benchmark([run])
     self.assertRaises(Exception, bench.mean)
     self.assertRaises(Exception, bench.median)
     self.assertRaises(Exception, bench.stdev)
     self.assertRaises(Exception, bench.median_abs_dev)
コード例 #9
0
    def test_get_warmups(self):
        # exact
        runs = [create_run((1.0, 2.0, 3.0), warmups=[(1, 1.0)]),
                create_run((5.0, 6.0), warmups=[(1, 4.0)])]
        bench = perf.Benchmark(runs)
        nwarmup = bench._get_nwarmup()
        self.assertEqual(nwarmup, 1)
        self.assertIsInstance(nwarmup, int)

        # average
        runs = [create_run([3.0], warmups=[(1, 1.0), (1, 2.0)]),
                create_run([4.0, 5.0, 6.0])]
        bench = perf.Benchmark(runs)
        nwarmup = bench._get_nwarmup()
        self.assertEqual(nwarmup, 1)
        self.assertIsInstance(nwarmup, float)
コード例 #10
0
ファイル: text_runner.py プロジェクト: ssbr/perf
    def _worker(self, sample_func):
        loops = self.args.loops
        if loops < 1:
            # FIXME: move this check in argument parsing
            raise ValueError("--loops must be >= 1")

        run_result = perf.RunResult(loops=loops,
                                    inner_loops=self.inner_loops,
                                    metadata=self.metadata)

        # only import metadata submodule in worker processes
        from perf import metadata as perf_metadata
        perf_metadata.collect_metadata(run_result.metadata)

        for is_warmup, run in self._range():
            dt = sample_func(loops)
            dt = float(dt) / loops
            if self.inner_loops is not None:
                dt /= self.inner_loops
            self._add(run_result, is_warmup, run, dt)

        self._display_run_result_avg(run_result)

        result = perf.Benchmark(name=self.name)
        result.runs.append(run_result)
        return result
コード例 #11
0
def cmd_collect_metadata(args):
    filename = args.output
    if filename and os.path.exists(filename):
        print("ERROR: The JSON file %r already exists" % filename)
        sys.exit(1)

    cpus = args.affinity
    if cpus:
        if not set_cpu_affinity(cpus):
            print("ERROR: failed to set the CPU affinity")
            sys.exit(1)
    else:
        cpus = get_isolated_cpus()
        if cpus:
            set_cpu_affinity(cpus)
            # ignore if set_cpu_affinity() failed

    run = perf.Run([1.0])
    metadata = run.get_metadata()
    if metadata:
        print("Metadata:")
        for line in format_metadata(metadata):
            print(line)

    if filename:
        run = run._update_metadata({'name': 'metadata'})
        bench = perf.Benchmark([run])
        bench.dump(filename)
コード例 #12
0
 def _worker(self, task):
     self._cpu_affinity()
     self._process_priority()
     run = task.create_run()
     bench = perf.Benchmark((run,))
     self._display_result(bench, checks=False)
     return bench
コード例 #13
0
ファイル: text_runner.py プロジェクト: ssbr/perf
    def _spawn_workers(self):
        verbose = self.args.verbose
        stream = self._stream()
        nprocess = self.args.processes
        bench = perf.Benchmark(name=self.name)

        for process in range(nprocess):
            run = self._spawn_worker()
            bench.runs.append(run)
            if verbose > 1:
                text = perf._very_verbose_run(run)
                print("Run %s/%s: %s" % (1 + process, nprocess, text),
                      file=stream)
            else:
                print(".", end='', file=stream)
                stream.flush()

        if verbose <= 1:
            print(file=stream)

        if self.args.metadata:
            perf._display_metadata(bench.get_metadata(), file=stream)

        perf._display_benchmark_avg(bench, verbose=verbose, file=stream)

        stream.flush()
        _json_dump(bench, self.args)
        return bench
コード例 #14
0
 def test_stats(self):
     values = [float(value) for value in range(1, 96)]
     run = create_run(values)
     bench = perf.Benchmark([run])
     self.assertEqual(bench.mean(), 48.0)
     self.assertEqual(bench.median(), 48.0)
     self.assertAlmostEqual(bench.stdev(), 27.5680, delta=1e-3)
     self.assertEqual(bench.median_abs_dev(), 24.0)
コード例 #15
0
 def test_stats_single(self):
     values = [7.0]
     run = create_run(values)
     bench = perf.Benchmark([run])
     self.assertEqual(bench.mean(), 7.0)
     self.assertEqual(bench.median(), 7.0)
     self.assertRaises(Exception, bench.stdev)
     self.assertEqual(bench.median_abs_dev(), 0.0)
コード例 #16
0
 def test_stats_same(self):
     values = [5.0 for i in range(10)]
     run = create_run(values)
     bench = perf.Benchmark([run])
     self.assertEqual(bench.mean(), 5.0)
     self.assertEqual(bench.median(), 5.0)
     self.assertEqual(bench.stdev(), 0.0)
     self.assertEqual(bench.median_abs_dev(), 0.0)
コード例 #17
0
 def test_format_result_calibration(self):
     run = perf.Run([], warmups=[(100, 1.0)],
                    metadata={'name': 'bench', 'loops': 100},
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench), '<calibration: 100 loops>')
     self.assertEqual(cli.format_result(bench), 'Calibration: 100 loops')
     self.assertRaises(ValueError, bench.median)
コード例 #18
0
ファイル: test_bench.py プロジェクト: umandalroald/perf
 def test_get_unit(self):
     run = perf.Run((1.0, ),
                    metadata={
                        'name': 'bench',
                        'unit': 'byte'
                    },
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(bench.get_unit(), 'byte')
コード例 #19
0
ファイル: test_runner.py プロジェクト: umandalroald/perf
    def test_compare_to(self):
        def time_func(loops):
            return 1.0

        def abs_executable(python):
            return python

        run = perf.Run([1.5],
                       metadata={'name': 'name'},
                       collect_metadata=False)
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])

        with ExitStack() as cm:

            def popen(*args, **kw):
                mock_popen = mock.Mock()
                mock_popen.wait.return_value = 0
                return mock_popen

            mock_subprocess = cm.enter_context(
                mock.patch('perf._runner.subprocess'))
            mock_subprocess.Popen.side_effect = popen

            cm.enter_context(
                mock.patch('perf._runner.abs_executable',
                           side_effect=abs_executable))
            cm.enter_context(
                mock.patch('perf._runner._load_suite_from_pipe',
                           return_value=suite))

            runner = perf.Runner()

            args = [
                "--python=python1", "--compare-to=python2", "--min-time=5",
                "-p1", "-w3", "-n7", "-l11"
            ]
            runner.parse_args(args)
            with tests.capture_stdout():
                runner.bench_time_func('name', time_func)

            def popen_call(python):
                args = [
                    python, mock.ANY, '--worker', '--pipe', mock.ANY,
                    '--worker-task=0', '--values', '7', '--warmups', '3',
                    '--loops', '11', '--min-time', '5.0'
                ]
                kw = {}
                if MS_WINDOWS:
                    kw['close_fds'] = False
                elif six.PY3:
                    kw['pass_fds'] = mock.ANY
                return mock.call(args, env=mock.ANY, **kw)

            call1 = popen_call('python2')
            call2 = popen_call('python1')
            mock_subprocess.Popen.assert_has_calls([call1, call2])
コード例 #20
0
    def test_get_dates(self):
        run = create_run(metadata={'date': '2016-07-20T14:06:00',
                                   'duration': 60.0,
                                   'name': 'bench1'})
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])
        self.assertEqual(suite.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 7, 0)))

        run = create_run(metadata={'date': '2016-07-20T14:10:00',
                                   'duration': 60.0,
                                   'name': 'bench2'})
        bench = perf.Benchmark([run])
        suite.add_benchmark(bench)
        self.assertEqual(suite.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 11, 0)))
コード例 #21
0
ファイル: test_perf_cli.py プロジェクト: yunstanford/perf
 def create_bench(self, values, metadata=None):
     if metadata is None:
         metadata = {'name': 'bench'}
     elif 'name' not in metadata:
         metadata['name'] = 'bench'
     runs = []
     for value in values:
         run = perf.Run([value], metadata=metadata, collect_metadata=False)
         runs.append(run)
     return perf.Benchmark(runs)
コード例 #22
0
 def test_format_result(self):
     run = perf.Run([1.0, 1.5, 2.0],
                    warmups=[(1, 3.0)],
                    metadata={'name': 'mybench'},
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '1.50 sec +- 0.50 sec')
     self.assertEqual(cli.format_result(bench),
                      'Mean +- std dev: 1.50 sec +- 0.50 sec')
コード例 #23
0
    def test_get_total_duration(self):
        # use duration metadata
        runs = [create_run([0.1], metadata={'duration': 1.0}),
                create_run([0.1], metadata={'duration': 2.0})]
        bench = perf.Benchmark(runs)
        self.assertEqual(bench.get_total_duration(), 3.0)

        # run without duration metadata
        bench.add_run(create_run([5.0], metadata={}))
        self.assertEqual(bench.get_total_duration(), 8.0)
コード例 #24
0
    def test_remove_all_metadata(self):
        run = perf.Run((1.0,),
                       metadata={'name': 'bench', 'os': 'win', 'unit': 'byte'},
                       collect_metadata=False)
        bench = perf.Benchmark([run])
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench', 'os': 'win', 'unit': 'byte'})

        bench._remove_all_metadata()
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench', 'unit': 'byte'})
コード例 #25
0
    def test_get_metadata(self):
        benchmarks = []
        for name in ('a', 'b'):
            run = perf.Run([1.0],
                           metadata={'name': name, 'os': 'linux'},
                           collect_metadata=False)
            bench = perf.Benchmark([run])
            benchmarks.append(bench)

        suite = perf.BenchmarkSuite(benchmarks)
        self.assertEqual(suite.get_metadata(),
                         {'os': 'linux'})
コード例 #26
0
    def test_update_metadata(self):
        runs = []
        for value in (1.0, 2.0, 3.0):
            runs.append(perf.Run((value,),
                                 metadata={'name': 'bench'},
                                 collect_metadata=False))
        bench = perf.Benchmark(runs)
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench'})

        bench.update_metadata({'os': 'linux'})
        self.assertEqual(bench.get_metadata(),
                         {'os': 'linux', 'name': 'bench'})
コード例 #27
0
    def test_extract_metadata(self):
        warmups = ((1, 5.0),)
        runs = [perf.Run((1.0,), warmups=warmups,
                         metadata={'name': 'bench', 'mem_usage': 5},
                         collect_metadata=False),
                perf.Run((2.0,), warmups=warmups,
                         metadata={'name': 'bench', 'mem_usage': 13},
                         collect_metadata=False)]
        bench = perf.Benchmark(runs)

        bench._extract_metadata('mem_usage')
        self.assertEqual(bench.get_values(), (5, 13))
        for run in bench.get_runs():
            self.assertEqual(run.warmups, ())
コード例 #28
0
    def test_results(self):
        runs = []
        for sample in (1.0, 1.5, 2.0):
            run = perf.RunResult([sample])
            run.metadata['key'] = 'value'
            runs.append(run)

        results = perf.Benchmark(runs, "name")
        self.assertEqual(results.runs, runs)
        self.assertEqual(results.name, "name")
        self.assertEqual(results.get_metadata(), {'key': 'value'})
        self.assertEqual(str(results), 'name: 1.50 sec +- 0.50 sec')
        self.assertEqual(results.format(0), '1.50 sec +- 0.50 sec')
        self.assertEqual(results.format(1), '1.50 sec +- 0.50 sec '
                         '(3 runs x 1 sample)')
コード例 #29
0
def main():
    testables = (
        Dummy,
        PyListAppend,
        PyListPrepend,
        PyDeque,
        Logger,
        AppendToFile,
        AppendToOpenedFile,
        SQliteBufferedList,
        SQliteBufferedReverseList,
        SQliteBufferedDeque,
        SQlitePeriodicCommit,
        SQliteSingleTransaction,
        # SQlite,
        SQliteRamSingleTransaction,
        SQliteRam)
    print(perf.Benchmark(testables, 300000).as_text())
コード例 #30
0
ファイル: test_perf_cli.py プロジェクト: yunstanford/perf
    def test_remove_warmups(self):
        values = [1.0, 2.0, 3.0]
        raw_values = [5.0] + values
        run = perf.Run(values, warmups=[(1, 5.0)], metadata={'name': 'bench'})
        bench = perf.Benchmark([run])

        self.assertEqual(bench._get_nwarmup(), 1)
        self.assertEqual(bench._get_raw_values(warmups=True), raw_values)

        with tests.temporary_directory() as tmpdir:
            filename = os.path.join(tmpdir, 'test.json')
            bench.dump(filename)

            stdout = self.run_command('convert', filename, '--remove-warmups',
                                      '--stdout')
            bench2 = perf.Benchmark.loads(stdout)

        self.assertEqual(bench2._get_nwarmup(), 0)
        self.assertEqual(bench2._get_raw_values(warmups=True), raw_values[1:])