Пример #1
0
def run_benchmarks(bench_funcs, should_run, cmd_prefix, options):
    suite = perf.BenchmarkSuite()
    to_run = list(sorted(should_run))
    run_count = str(len(to_run))
    for index, name in enumerate(to_run):
        func = bench_funcs[name]
        print("[%s/%s] %s..." %
              (str(index + 1).rjust(len(run_count)), run_count, name))
        sys.stdout.flush()

        def add_bench(dest_suite, obj):
            if isinstance(obj, perf.BenchmarkSuite):
                benchmarks = obj
            else:
                benchmarks = (obj, )

            version = performance.__version__
            for bench in benchmarks:
                bench.update_metadata({'performance_version': version})
                dest_suite.add_benchmark(bench)

        try:
            bench = func(cmd_prefix, options)
        except Exception as exc:
            print("ERROR: Benchmark %s failed: %s" % (name, exc))
            sys.exit(1)

        add_bench(suite, bench)

    print()

    return suite
Пример #2
0
def cmd_run(parser, options, bench_funcs, bench_groups):
    print("Python benchmark suite %s" % performance.__version__)
    print()

    base = sys.executable

    # Get the full path since child processes are run in an empty environment
    # without the PATH variable
    base = which(base)

    if options.output:
        check_existing(options.output)

    options.base_binary = base

    if not options.control_label:
        options.control_label = options.base_binary

    base_args = options.args.split()
    base_cmd_prefix = [base] + base_args

    logging.basicConfig(level=logging.INFO)

    should_run = ParseBenchmarksOption(options.benchmarks, bench_groups,
                                       options.fast or options.debug_single_sample)

    should_run = FilterBenchmarks(should_run, bench_funcs, base_cmd_prefix)

    base_suite = perf.BenchmarkSuite()
    to_run = list(sorted(should_run))
    run_count = str(len(to_run))
    for index, name in enumerate(to_run):
        func = bench_funcs[name]
        print("[%s/%s] %s..." %
              (str(index+1).rjust(len(run_count)), run_count, name))
        options.benchmark_name = name  # Easier than threading this everywhere.

        def add_bench(dest_suite, bench):
            if isinstance(bench, perf.BenchmarkSuite):
                benchmarks = bench.get_benchmarks()
                for bench in benchmarks:
                    dest_suite.add_benchmark(bench)
            else:
                dest_suite.add_benchmark(bench)

        bench = func(base_cmd_prefix, options)
        add_bench(base_suite, bench)

    print()
    print("Report on %s" % " ".join(platform.uname()))
    if multiprocessing:
        print("Total CPU cores:", multiprocessing.cpu_count())

    if options.output:
        base_suite.dump(options.output)

    if options.append:
        perf.add_runs(options.append, base_suite)

    display_suite(base_suite)
Пример #3
0
    def test_compare_to(self):
        def time_func(loops):
            return 1.0

        def abs_executable(python):
            return python

        run = perf.Run([1.5],
                       metadata={'name': 'name'},
                       collect_metadata=False)
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])

        with ExitStack() as cm:

            def popen(*args, **kw):
                mock_popen = mock.Mock()
                mock_popen.wait.return_value = 0
                return mock_popen

            mock_subprocess = cm.enter_context(
                mock.patch('perf._runner.subprocess'))
            mock_subprocess.Popen.side_effect = popen

            cm.enter_context(
                mock.patch('perf._runner.abs_executable',
                           side_effect=abs_executable))
            cm.enter_context(
                mock.patch('perf._runner._load_suite_from_pipe',
                           return_value=suite))

            runner = perf.Runner()

            args = [
                "--python=python1", "--compare-to=python2", "--min-time=5",
                "-p1", "-w3", "-n7", "-l11"
            ]
            runner.parse_args(args)
            with tests.capture_stdout():
                runner.bench_time_func('name', time_func)

            def popen_call(python):
                args = [
                    python, mock.ANY, '--worker', '--pipe', mock.ANY,
                    '--worker-task=0', '--values', '7', '--warmups', '3',
                    '--loops', '11', '--min-time', '5.0'
                ]
                kw = {}
                if MS_WINDOWS:
                    kw['close_fds'] = False
                elif six.PY3:
                    kw['pass_fds'] = mock.ANY
                return mock.call(args, env=mock.ANY, **kw)

            call1 = popen_call('python2')
            call2 = popen_call('python1')
            mock_subprocess.Popen.assert_has_calls([call1, call2])
Пример #4
0
    def test_get_total_duration(self):
        run = create_run([1.0])
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])

        run = create_run([2.0])
        bench = perf.Benchmark([run])
        suite.add_runs(bench)

        self.assertEqual(suite.get_total_duration(), 3.0)
Пример #5
0
    def test_suite(self):
        telco = self.benchmark('telco')
        go = self.benchmark('go')
        suite = perf.BenchmarkSuite([telco, go])

        self.assertIsNone(suite.filename)
        self.assertEqual(len(suite), 2)
        self.assertEqual(suite.get_benchmarks(), [telco, go])
        self.assertEqual(suite.get_benchmark('go'), go)
        with self.assertRaises(KeyError):
            suite.get_benchmark('non_existent')
Пример #6
0
    def test_get_metadata(self):
        benchmarks = []
        for name in ('a', 'b'):
            run = perf.Run([1.0],
                           metadata={'name': name, 'os': 'linux'},
                           collect_metadata=False)
            bench = perf.Benchmark([run])
            benchmarks.append(bench)

        suite = perf.BenchmarkSuite(benchmarks)
        self.assertEqual(suite.get_metadata(),
                         {'os': 'linux'})
Пример #7
0
 def create_suite(self):
     bench1 = self.create_bench((1.0, 1.5, 2.0),
                                metadata={
                                    'hostname': 'toto',
                                    'python_version': '2.7',
                                    'name': 'py2'
                                })
     bench2 = self.create_bench((1.5, 2.0, 2.5),
                                metadata={
                                    'hostname': 'toto',
                                    'python_version': '3.4',
                                    'name': 'py3'
                                })
     return perf.BenchmarkSuite([bench1, bench2])
Пример #8
0
    def test_add_runs(self):
        # bench 1
        values = (1.0, 2.0, 3.0)
        run = perf.Run(values, metadata={'name': "bench"})
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])

        # bench 2
        values2 = (4.0, 5.0, 6.0)
        run = perf.Run(values2, metadata={'name': "bench"})
        bench2 = perf.Benchmark([run])
        suite.add_runs(bench2)

        bench = suite.get_benchmark('bench')
        self.assertEqual(bench.get_values(), values + values2)
Пример #9
0
    def _main(self):
        start_time = perf.monotonic_clock()

        self.parse_args()

        self._cpu_affinity()

        suite = perf.BenchmarkSuite()
        try:
            self._spawn_workers(suite, start_time)
        except KeyboardInterrupt:
            print("Interrupted: exit", file=sys.stderr)
            sys.exit(1)

        return suite
Пример #10
0
    def __init__(self, runner, name, verbose=None):

        self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
                    (time.localtime(time.time())[:6])
        if verbose is not None:
            self.verbose = verbose

        # Init vars
        self.tests = {}
        if _debug:
            print('Getting machine details...')
        self.machine_details = get_machine_details()

        self.suite = perf.BenchmarkSuite()
        self.runner = runner
Пример #11
0
        def add_bench(dest_suite, obj):
            if isinstance(obj, perf.BenchmarkSuite):
                benchmarks = obj
            else:
                benchmarks = (obj, )

            version = performance.__version__
            for bench in benchmarks:
                bench.update_metadata({'performance_version': version})

                if dest_suite is not None:
                    dest_suite.add_benchmark(bench)
                else:
                    dest_suite = perf.BenchmarkSuite([bench])

            return dest_suite
Пример #12
0
    def test_get_dates(self):
        run = create_run(metadata={'date': '2016-07-20T14:06:00',
                                   'duration': 60.0,
                                   'name': 'bench1'})
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])
        self.assertEqual(suite.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 7, 0)))

        run = create_run(metadata={'date': '2016-07-20T14:10:00',
                                   'duration': 60.0,
                                   'name': 'bench2'})
        bench = perf.Benchmark([run])
        suite.add_benchmark(bench)
        self.assertEqual(suite.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 11, 0)))
Пример #13
0
    def test_filter_benchmarks(self):
        values = (1.0, 1.5, 2.0)
        benchmarks = []
        for name in ("call_simple", "go", "telco"):
            bench = self.create_bench(values, metadata={'name': name})
            benchmarks.append(bench)
        suite = perf.BenchmarkSuite(benchmarks)

        with tests.temporary_directory() as tmpdir:
            filename = os.path.join(tmpdir, 'test.json')
            suite.dump(filename)

            stdout = self.run_command('convert', filename,
                                      '--include-benchmark', 'go', '--stdout')
            suite2 = perf.BenchmarkSuite.loads(stdout)

            stdout = self.run_command('convert', filename,
                                      '--exclude-benchmark', 'go', '--stdout')
            suite3 = perf.BenchmarkSuite.loads(stdout)

        self.assertEqual(suite2.get_benchmark_names(), ['go'])

        self.assertEqual(suite3.get_benchmark_names(),
                         ['call_simple', 'telco'])
Пример #14
0
 def create_dummy_suite(self):
     telco = self.benchmark('telco')
     go = self.benchmark('go')
     return perf.BenchmarkSuite([telco, go])