コード例 #1
0
    def test_filter_benchmarks(self):
        values = (1.0, 1.5, 2.0)
        benchmarks = []
        for name in ("call_simple", "go", "telco"):
            bench = self.create_bench(values, metadata={'name': name})
            benchmarks.append(bench)
        suite = pyperf.BenchmarkSuite(benchmarks)

        with tests.temporary_directory() as tmpdir:
            filename = os.path.join(tmpdir, 'test.json')
            suite.dump(filename)

            stdout = self.run_command('convert', filename,
                                      '--include-benchmark', 'go', '--stdout')
            suite2 = pyperf.BenchmarkSuite.loads(stdout)

            stdout = self.run_command('convert', filename,
                                      '--exclude-benchmark', 'go', '--stdout')
            suite3 = pyperf.BenchmarkSuite.loads(stdout)

        self.assertEqual(suite2.get_benchmark_names(),
                         ['go'])

        self.assertEqual(suite3.get_benchmark_names(),
                         ['call_simple', 'telco'])
コード例 #2
0
ファイル: micro.py プロジェクト: thejager/Benchmark
def run_perf_script(level):
    cmd = utils.build_command('micro_script.py')

    benchmarks = []
    for b in config_micro.benchmarks:
        if config_micro.clear_db:
            utils.drop_tables(config_micro.db_url)
        server_pid = utils.start_app(level,
                                     config_micro.webserver,
                                     config_micro.port,
                                     config_micro.url,
                                     'micro.app:app',
                                     output=config_micro.output)
        time.sleep(config_micro.bm_cooldown)
        benchmark_file = configparser.ConfigParser()
        benchmark_file['bench'] = {'name': b[0], 'desc': b[1]}
        with open('bm_info.ini', 'w') as configfile:
            benchmark_file.write(configfile)

        with temporary_file() as tmp:
            cmd.extend(('--output', tmp))
            run_command(cmd)
            benchmarks.append(pyperf.Benchmark.load(tmp))

        utils.stop_app(server_pid)
        time.sleep(config_micro.bm_cooldown)

    return pyperf.BenchmarkSuite(benchmarks)
コード例 #3
0
    def test_get_total_duration(self):
        run = create_run([1.0])
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])

        run = create_run([2.0])
        bench = pyperf.Benchmark([run])
        suite.add_runs(bench)

        self.assertEqual(suite.get_total_duration(), 3.0)
コード例 #4
0
 def create_suite(self):
     bench1 = self.create_bench((1.0, 1.5, 2.0),
                                metadata={'hostname': 'toto',
                                          'python_version': '2.7',
                                          'name': 'py36'})
     bench2 = self.create_bench((1.5, 2.0, 2.5),
                                metadata={'hostname': 'toto',
                                          'python_version': '3.4',
                                          'name': 'py38'})
     return pyperf.BenchmarkSuite([bench1, bench2])
コード例 #5
0
    def test_compare_to(self):
        def time_func(loops):
            return 1.0

        def abs_executable(python):
            return python

        run = pyperf.Run([1.5],
                         metadata={'name': 'name'},
                         collect_metadata=False)
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])

        with ExitStack() as cm:

            def popen(*args, **kw):
                mock_popen = mock.Mock()
                mock_popen.wait.return_value = 0
                return mock_popen

            mock_subprocess = cm.enter_context(
                mock.patch('pyperf._master.subprocess'))
            mock_subprocess.Popen.side_effect = popen

            cm.enter_context(
                mock.patch('pyperf._runner.abs_executable',
                           side_effect=abs_executable))
            cm.enter_context(
                mock.patch('pyperf._master._load_suite_from_pipe',
                           return_value=suite))

            args = [
                "--python=python3.8", "--compare-to=python3.6", "--min-time=5",
                "-p1", "-w3", "-n7", "-l11"
            ]
            runner = self.create_runner(args)
            with tests.capture_stdout():
                runner.bench_time_func('name', time_func)

            def popen_call(python):
                args = [
                    python, mock.ANY, '--worker', '--pipe', mock.ANY,
                    '--worker-task=0', '--values', '7', '--min-time', '5.0',
                    '--loops', '11', '--warmups', '3'
                ]
                kw = {}
                if MS_WINDOWS:
                    kw['close_fds'] = False
                else:
                    kw['pass_fds'] = mock.ANY
                return mock.call(args, env=mock.ANY, **kw)

            call1 = popen_call('python3.6')
            call2 = popen_call('python3.8')
            mock_subprocess.Popen.assert_has_calls([call1, call2])
コード例 #6
0
    def test_suite(self):
        telco = self.benchmark('telco')
        go = self.benchmark('go')
        suite = pyperf.BenchmarkSuite([telco, go])

        self.assertIsNone(suite.filename)
        self.assertEqual(len(suite), 2)
        self.assertEqual(suite.get_benchmarks(), [telco, go])
        self.assertEqual(suite.get_benchmark('go'), go)
        with self.assertRaises(KeyError):
            suite.get_benchmark('non_existent')
コード例 #7
0
    def test_get_metadata(self):
        benchmarks = []
        for name in ('a', 'b'):
            run = pyperf.Run([1.0],
                             metadata={'name': name, 'os': 'linux'},
                             collect_metadata=False)
            bench = pyperf.Benchmark([run])
            benchmarks.append(bench)

        suite = pyperf.BenchmarkSuite(benchmarks)
        self.assertEqual(suite.get_metadata(),
                         {'os': 'linux'})
コード例 #8
0
ファイル: run.py プロジェクト: sourcery-ai-bot/pyperformance
        def add_bench(dest_suite, obj):
            benchmarks = obj if isinstance(obj,
                                           pyperf.BenchmarkSuite) else (obj, )
            version = pyperformance.__version__
            for bench in benchmarks:
                bench.update_metadata({'performance_version': version})

                if dest_suite is not None:
                    dest_suite.add_benchmark(bench)
                else:
                    dest_suite = pyperf.BenchmarkSuite([bench])

            return dest_suite
コード例 #9
0
    def test_add_runs(self):
        # bench 1
        values = (1.0, 2.0, 3.0)
        run = pyperf.Run(values, metadata={'name': "bench"})
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])

        # bench 2
        values2 = (4.0, 5.0, 6.0)
        run = pyperf.Run(values2, metadata={'name': "bench"})
        bench2 = pyperf.Benchmark([run])
        suite.add_runs(bench2)

        bench = suite.get_benchmark('bench')
        self.assertEqual(bench.get_values(), values + values2)
コード例 #10
0
ファイル: run.py プロジェクト: kmod/performance
        def add_bench(dest_suite, obj):
            if isinstance(obj, pyperf.BenchmarkSuite):
                results = obj
            else:
                results = (obj, )

            version = pyperformance.__version__
            for res in results:
                res.update_metadata({'performance_version': version})

                if dest_suite is not None:
                    dest_suite.add_benchmark(res)
                else:
                    dest_suite = pyperf.BenchmarkSuite([res])

            return dest_suite
コード例 #11
0
    def test_get_dates(self):
        run = create_run(metadata={'date': '2016-07-20T14:06:00',
                                   'duration': 60.0,
                                   'name': 'bench1'})
        bench = pyperf.Benchmark([run])
        suite = pyperf.BenchmarkSuite([bench])
        self.assertEqual(suite.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 7, 0)))

        run = create_run(metadata={'date': '2016-07-20T14:10:00',
                                   'duration': 60.0,
                                   'name': 'bench2'})
        bench = pyperf.Benchmark([run])
        suite.add_benchmark(bench)
        self.assertEqual(suite.get_dates(),
                         (datetime.datetime(2016, 7, 20, 14, 6, 0),
                          datetime.datetime(2016, 7, 20, 14, 11, 0)))
コード例 #12
0
ファイル: __init__.py プロジェクト: ichard26/blackbench
def run_suite(benchmarks: List[Benchmark], pyperf_args: Sequence[str],
              workdir: Path) -> Tuple[Optional[pyperf.BenchmarkSuite], bool]:
    import black

    results: List[pyperf.Benchmark] = []
    errored = False
    for i, bm in enumerate(benchmarks, start=1):
        bm_type = f"{'micro' if bm.micro else ''}benchmark"
        bm_count = f"({i}/{len(benchmarks)})"
        log(f"Running `{bm.name}` {bm_type} {bm_count}", bold=True)
        script = workdir / f"{i}.py"
        script.write_text(bm.code, encoding="utf8")
        result_file = workdir / f"{i}.json"

        cmd = [
            sys.executable,
            str(script), "--output",
            str(result_file), *pyperf_args
        ]
        t0 = time.perf_counter()
        try:
            subprocess.run(cmd, check=True)
        except subprocess.CalledProcessError:
            err("Failed to run benchmark ^^^")
            errored = True
        else:
            t1 = time.perf_counter()
            log(f"Took {round(t1 - t0, 3)} seconds.", bold=True)

            result = pyperf.Benchmark.loads(
                result_file.read_text(encoding="utf8"))
            # fmt: off
            result.update_metadata({
                "description": bm.description,
                "blackbench-version": __version__,
                "black-version": black.__version__
            })
            # fmt: on
            results.append(result)
    else:
        if results:
            return pyperf.BenchmarkSuite(results), errored
        else:
            return None, True
コード例 #13
0
ファイル: benchmarks.py プロジェクト: zhanghb-net/httpie
 def run(self) -> pyperf.BenchmarkSuite:
     results = [benchmark.run(self) for benchmark in self.benchmarks]
     return pyperf.BenchmarkSuite(results)
コード例 #14
0
 def create_dummy_suite(self):
     telco = self.benchmark('telco')
     go = self.benchmark('go')
     return pyperf.BenchmarkSuite([telco, go])