Exemplo n.º 1
0
    def test_get_date(self):
        date = datetime.datetime.now().isoformat(' ')
        run = perf.Run([1.0], metadata={'date': date}, collect_metadata=False)
        self.assertEqual(run._get_date(), date)

        run = perf.Run([1.0], collect_metadata=False)
        self.assertIsNone(run._get_date())
Exemplo n.º 2
0
    def test_number_types(self):
        # ensure that all types of numbers are accepted
        for number_type in NUMBER_TYPES:
            run = perf.Run([number_type(1)], collect_metadata=False)
            self.assertIsInstance(run.values[0], number_type)

            run = perf.Run([5], warmups=[(4, number_type(3))],
                           collect_metadata=False)
            self.assertEqual(run.warmups, ((4, 3),))
            self.assertIsInstance(run.warmups[0][1], number_type)
Exemplo n.º 3
0
    def test_constructor(self):
        # need at least one value or one warmup value
        with self.assertRaises(ValueError):
            perf.Run([], collect_metadata=False)
        perf.Run([1.0], collect_metadata=False)
        perf.Run([], warmups=[(4, 1.0)], collect_metadata=False)

        # number of loops
        with self.assertRaises(ValueError):
            perf.Run([1.0], metadata={'loops': -1}, collect_metadata=False)
        with self.assertRaises(ValueError):
            perf.Run([1.0], metadata={'inner_loops': 0}, collect_metadata=False)

        # loops type error
        with self.assertRaises(ValueError):
            perf.Run([1.0], metadata={'loops': 1.0}, collect_metadata=False)
        with self.assertRaises(ValueError):
            perf.Run([1.0], metadata={'inner_loops': 1.0}, collect_metadata=False)

        # metadata value must not be an empty string
        with self.assertRaises(ValueError):
            perf.Run([1.0], metadata={'name': ''}, collect_metadata=False)
        run = perf.Run([1.0], metadata={'load_avg_1min': 0.0},
                       collect_metadata=False)
        self.assertEqual(run.get_metadata()['load_avg_1min'], 0.0)
Exemplo n.º 4
0
    def test_extract_metadata(self):
        warmups = ((1, 5.0),)
        runs = [perf.Run((1.0,), warmups=warmups,
                         metadata={'name': 'bench', 'mem_usage': 5},
                         collect_metadata=False),
                perf.Run((2.0,), warmups=warmups,
                         metadata={'name': 'bench', 'mem_usage': 13},
                         collect_metadata=False)]
        bench = perf.Benchmark(runs)

        bench._extract_metadata('mem_usage')
        self.assertEqual(bench.get_values(), (5, 13))
        for run in bench.get_runs():
            self.assertEqual(run.warmups, ())
Exemplo n.º 5
0
    def test_add_runs(self):
        # bench 1
        values = (1.0, 2.0, 3.0)
        run = perf.Run(values, metadata={'name': "bench"})
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])

        # bench 2
        values2 = (4.0, 5.0, 6.0)
        run = perf.Run(values2, metadata={'name': "bench"})
        bench2 = perf.Benchmark([run])
        suite.add_runs(bench2)

        bench = suite.get_benchmark('bench')
        self.assertEqual(bench.get_values(), values + values2)
Exemplo n.º 6
0
    def run(self, runner, rounds):
        """ Run the test in two phases: first calibrate, then
            do the actual test. Be careful to keep the calibration
            timing low w/r to the test timing.

        """
        name = 'pybench.%s' % self.__class__.__name__
        loops = self.loops
        total_loops = loops * self.inner_loops

        warmups = []
        if self._calibrate_warmups:
            warmups.extend(self._calibrate_warmups)
        samples = []
        for i in range(rounds):
            dt = self.test(loops)
            dt /= total_loops
            if i < runner.args.warmups:
                warmups.append((loops, dt))
            else:
                samples.append(dt)

        metadata = {
            'name': name,
            'pybench_version': __version__,
            'loops': loops
        }
        if self.inner_loops != 1:
            metadata['inner_loops'] = self.inner_loops
        run = perf.Run(samples, warmups=warmups, metadata=metadata)
        self.bench.add_run(run)
Exemplo n.º 7
0
def cmd_collect_metadata(args):
    filename = args.output
    if filename and os.path.exists(filename):
        print("ERROR: The JSON file %r already exists" % filename)
        sys.exit(1)

    cpus = args.affinity
    if cpus:
        if not set_cpu_affinity(cpus):
            print("ERROR: failed to set the CPU affinity")
            sys.exit(1)
    else:
        cpus = get_isolated_cpus()
        if cpus:
            set_cpu_affinity(cpus)
            # ignore if set_cpu_affinity() failed

    run = perf.Run([1.0])
    metadata = run.get_metadata()
    if metadata:
        print("Metadata:")
        for line in format_metadata(metadata):
            print(line)

    if filename:
        run = run._update_metadata({'name': 'metadata'})
        bench = perf.Benchmark([run])
        bench.dump(filename)
Exemplo n.º 8
0
def create_run(values=None, warmups=None, metadata=None):
    if values is None:
        values = (1.0, )
    if metadata is None:
        metadata = {'name': 'bench'}
    elif 'name' not in metadata:
        metadata['name'] = 'bench'
    return perf.Run(values, warmups, metadata=metadata, collect_metadata=False)
Exemplo n.º 9
0
 def test_format_result_calibration(self):
     run = perf.Run([], warmups=[(100, 1.0)],
                    metadata={'name': 'bench', 'loops': 100},
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench), '<calibration: 100 loops>')
     self.assertEqual(cli.format_result(bench), 'Calibration: 100 loops')
     self.assertRaises(ValueError, bench.median)
Exemplo n.º 10
0
 def test_get_unit(self):
     run = perf.Run((1.0, ),
                    metadata={
                        'name': 'bench',
                        'unit': 'byte'
                    },
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(bench.get_unit(), 'byte')
Exemplo n.º 11
0
    def create_run(self):
        start_time = monotonic_clock()
        self.compute()
        self.metadata['duration'] = monotonic_clock() - start_time

        return perf.Run(self.values,
                        warmups=self.warmups,
                        metadata=self.metadata,
                        collect_metadata=False)
Exemplo n.º 12
0
    def test_compare_to(self):
        def time_func(loops):
            return 1.0

        def abs_executable(python):
            return python

        run = perf.Run([1.5],
                       metadata={'name': 'name'},
                       collect_metadata=False)
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])

        with ExitStack() as cm:

            def popen(*args, **kw):
                mock_popen = mock.Mock()
                mock_popen.wait.return_value = 0
                return mock_popen

            mock_subprocess = cm.enter_context(
                mock.patch('perf._runner.subprocess'))
            mock_subprocess.Popen.side_effect = popen

            cm.enter_context(
                mock.patch('perf._runner.abs_executable',
                           side_effect=abs_executable))
            cm.enter_context(
                mock.patch('perf._runner._load_suite_from_pipe',
                           return_value=suite))

            runner = perf.Runner()

            args = [
                "--python=python1", "--compare-to=python2", "--min-time=5",
                "-p1", "-w3", "-n7", "-l11"
            ]
            runner.parse_args(args)
            with tests.capture_stdout():
                runner.bench_time_func('name', time_func)

            def popen_call(python):
                args = [
                    python, mock.ANY, '--worker', '--pipe', mock.ANY,
                    '--worker-task=0', '--values', '7', '--warmups', '3',
                    '--loops', '11', '--min-time', '5.0'
                ]
                kw = {}
                if MS_WINDOWS:
                    kw['close_fds'] = False
                elif six.PY3:
                    kw['pass_fds'] = mock.ANY
                return mock.call(args, env=mock.ANY, **kw)

            call1 = popen_call('python2')
            call2 = popen_call('python1')
            mock_subprocess.Popen.assert_has_calls([call1, call2])
Exemplo n.º 13
0
 def create_bench(self, values, metadata=None):
     if metadata is None:
         metadata = {'name': 'bench'}
     elif 'name' not in metadata:
         metadata['name'] = 'bench'
     runs = []
     for value in values:
         run = perf.Run([value], metadata=metadata, collect_metadata=False)
         runs.append(run)
     return perf.Benchmark(runs)
Exemplo n.º 14
0
    def test_attr(self):
        run = perf.Run((2.0, 3.0),
                       warmups=((4, 0.5),),
                       metadata={'loops': 2, 'inner_loops': 5},
                       collect_metadata=False)
        self.assertEqual(run.get_loops(), 2)
        self.assertEqual(run.get_inner_loops(), 5)
        self.assertEqual(run.get_total_loops(), 2 * 5)
        self.assertEqual(run.values,
                         (2.0, 3.0))
        self.assertEqual(run._get_raw_values(),
                         [20.0, 30.0])
        self.assertEqual(run._get_raw_values(warmups=True),
                         [10.0, 20.0, 30.0])

        run = perf.Run((2.0, 3.0), warmups=((1, 1.0),))
        self.assertEqual(run.get_loops(), 1)
        self.assertEqual(run.get_inner_loops(), 1)
        self.assertEqual(run.get_total_loops(), 1)
Exemplo n.º 15
0
 def test_format_result(self):
     run = perf.Run([1.0, 1.5, 2.0],
                    warmups=[(1, 3.0)],
                    metadata={'name': 'mybench'},
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '1.50 sec +- 0.50 sec')
     self.assertEqual(cli.format_result(bench),
                      'Mean +- std dev: 1.50 sec +- 0.50 sec')
Exemplo n.º 16
0
    def test_remove_all_metadata(self):
        run = perf.Run((1.0,),
                       metadata={'name': 'bench', 'os': 'win', 'unit': 'byte'},
                       collect_metadata=False)
        bench = perf.Benchmark([run])
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench', 'os': 'win', 'unit': 'byte'})

        bench._remove_all_metadata()
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench', 'unit': 'byte'})
Exemplo n.º 17
0
    def test_get_metadata(self):
        benchmarks = []
        for name in ('a', 'b'):
            run = perf.Run([1.0],
                           metadata={'name': name, 'os': 'linux'},
                           collect_metadata=False)
            bench = perf.Benchmark([run])
            benchmarks.append(bench)

        suite = perf.BenchmarkSuite(benchmarks)
        self.assertEqual(suite.get_metadata(),
                         {'os': 'linux'})
Exemplo n.º 18
0
    def test_update_metadata(self):
        runs = []
        for value in (1.0, 2.0, 3.0):
            runs.append(perf.Run((value,),
                                 metadata={'name': 'bench'},
                                 collect_metadata=False))
        bench = perf.Benchmark(runs)
        self.assertEqual(bench.get_metadata(),
                         {'name': 'bench'})

        bench.update_metadata({'os': 'linux'})
        self.assertEqual(bench.get_metadata(),
                         {'os': 'linux', 'name': 'bench'})
Exemplo n.º 19
0
    def test_remove_warmups(self):
        values = [1.0, 2.0, 3.0]
        raw_values = [5.0] + values
        run = perf.Run(values, warmups=[(1, 5.0)], metadata={'name': 'bench'})
        bench = perf.Benchmark([run])

        self.assertEqual(bench._get_nwarmup(), 1)
        self.assertEqual(bench._get_raw_values(warmups=True), raw_values)

        with tests.temporary_directory() as tmpdir:
            filename = os.path.join(tmpdir, 'test.json')
            bench.dump(filename)

            stdout = self.run_command('convert', filename, '--remove-warmups',
                                      '--stdout')
            bench2 = perf.Benchmark.loads(stdout)

        self.assertEqual(bench2._get_nwarmup(), 0)
        self.assertEqual(bench2._get_raw_values(warmups=True), raw_values[1:])
Exemplo n.º 20
0
    def test_benchmark(self):
        values = (1.0, 1.5, 2.0)
        raw_values = tuple(value * 3 * 20 for value in values)
        runs = []
        for value in values:
            run = perf.Run([value],
                           warmups=[(1, 3.0)],
                           metadata={
                               'key': 'value',
                               'loops': 20,
                               'inner_loops': 3,
                               'name': 'mybench'
                           },
                           collect_metadata=False)
            runs.append(run)
        bench = perf.Benchmark(runs)

        self.assertEqual(bench.get_values(), values)
        self.assertEqual(bench.get_unit(), 'second')
        self.assertEqual(bench._get_raw_values(), list(raw_values))
        self.assertEqual(bench.get_nrun(), 3)

        runs = bench.get_runs()
        self.assertIsInstance(runs, list)
        self.assertEqual(len(runs), 3)
        for run in runs:
            self.assertIsInstance(run, perf.Run)
            self.assertEqual(len(run._get_raw_values(True)), 2)
            self.assertEqual(run._get_loops(), 20)
            self.assertEqual(run._get_inner_loops(), 3)

        self.check_runs(bench, [(1, 3.0)], values)

        self.assertEqual(bench.get_name(), "mybench")
        self.assertEqual(bench.get_metadata(), {
            'key': 'value',
            'name': 'mybench',
            'loops': 20,
            'inner_loops': 3
        })
        self.assertEqual(repr(bench), "<Benchmark 'mybench' with 3 runs>")
Exemplo n.º 21
0
 def benchmark(self, name):
     run = perf.Run([1.0, 1.5, 2.0],
                    metadata={'name': name},
                    collect_metadata=False)
     return perf.Benchmark([run])
Exemplo n.º 22
0
"""
import os
import platform
import sys
import scipy
import numpy
import astropy
import perf

import MulensModel

print("System: " + os.name + " " + platform.system() + " " +
      platform.release())
print("Python: " + sys.version)
print("scipy: " + scipy.__version__)
print("numpy: " + numpy.__version__)
print("astropy: " + astropy.__version__)
print("perf: " + perf.__version__)
print("MulensModel: " + MulensModel.__version__)
print("")

# Values below don't matter:
meta = perf.Run([1.0, 1.5, 2.0], warmups=[(1, 3.0)]).get_metadata()

keys = [
    'date', 'platform', 'hostname', 'cpu_model_name', 'cpu_count',
    'python_version'
]
for key in keys:
    print(key, meta[key])
Exemplo n.º 23
0
 def test_name(self):
     # name must be non-empty
     with self.assertRaises(ValueError):
         perf.Run([1.0], metadata={'name': '   '})
Exemplo n.º 24
0
 def test_name(self):
     # no name metadata
     run = perf.Run([1.0])
     with self.assertRaises(ValueError):
         perf.Benchmark([run])