def main():
    def worker_cmd(cmd, args):
        cmd.extend(args.benchmark)

    runner = perf.Runner(processes=5, values=3, add_cmdline_args=worker_cmd)

    all_names = 'dnspython', 'blocking', 'ares', 'thread'
    runner.argparser.add_argument('benchmark',
                                  nargs='*',
                                  default='all',
                                  choices=all_names + ('all', ))

    args = runner.parse_args()

    if 'all' in args.benchmark or args.benchmark == 'all':
        args.benchmark = ['all']
        names = all_names
    else:
        names = args.benchmark

    for name in names:
        runner.bench_func(name + ' sequential',
                          run_all,
                          name,
                          resolve_seq,
                          inner_loops=N)
        runner.bench_func(name + ' parallel',
                          run_all,
                          name,
                          resolve_par,
                          inner_loops=N)
def main():
    runner = perf.Runner()

    runner.bench_time_func(
        "no_tracer",
        bench_no_trace,
        inner_loops=N
    )

    runner.bench_time_func(
        "trivial_tracer",
        bench_trivial_tracer,
        inner_loops=N
    )

    runner.bench_time_func(
        "monitor_tracer",
        bench_monitor_tracer,
        inner_loops=N
    )

    runner.bench_time_func(
        "max_switch_tracer",
        bench_max_switch_tracer,
        inner_loops=N
    )

    runner.bench_time_func(
        "hub_switch_tracer",
        bench_hub_switch_tracer,
        inner_loops=N
    )
Exemple #3
0
    def exec_runner(self, *args, **kwargs):
        def fake_timer():
            t = fake_timer.value
            fake_timer.value += 1.0
            return t

        fake_timer.value = 0.0

        name = kwargs.pop('name', 'bench')
        time_func = kwargs.pop('time_func', None)

        runner = perf.Runner(**kwargs)
        # disable CPU affinity to not pollute stdout
        runner._cpu_affinity = lambda: None
        runner.parse_args(args)

        with mock.patch('perf.perf_counter', fake_timer):
            with tests.capture_stdout() as stdout:
                with tests.capture_stderr() as stderr:
                    if time_func:
                        bench = runner.bench_time_func(name, time_func)
                    else:
                        bench = runner.bench_func(name, check_args, None, 1, 2)

        stdout = stdout.getvalue()
        stderr = stderr.getvalue()
        if '--stdout' not in args:
            self.assertEqual(stderr, '')

        # check bench_time_func() bench
        self.assertIsInstance(bench, perf.Benchmark)
        self.assertEqual(bench.get_name(), name)
        self.assertEqual(bench.get_nrun(), 1)

        return Result(runner, bench, stdout)
Exemple #4
0
    def test_compare_to(self):
        def time_func(loops):
            return 1.0

        def abs_executable(python):
            return python

        run = perf.Run([1.5],
                       metadata={'name': 'name'},
                       collect_metadata=False)
        bench = perf.Benchmark([run])
        suite = perf.BenchmarkSuite([bench])

        with ExitStack() as cm:

            def popen(*args, **kw):
                mock_popen = mock.Mock()
                mock_popen.wait.return_value = 0
                return mock_popen

            mock_subprocess = cm.enter_context(
                mock.patch('perf._runner.subprocess'))
            mock_subprocess.Popen.side_effect = popen

            cm.enter_context(
                mock.patch('perf._runner.abs_executable',
                           side_effect=abs_executable))
            cm.enter_context(
                mock.patch('perf._runner._load_suite_from_pipe',
                           return_value=suite))

            runner = perf.Runner()

            args = [
                "--python=python1", "--compare-to=python2", "--min-time=5",
                "-p1", "-w3", "-n7", "-l11"
            ]
            runner.parse_args(args)
            with tests.capture_stdout():
                runner.bench_time_func('name', time_func)

            def popen_call(python):
                args = [
                    python, mock.ANY, '--worker', '--pipe', mock.ANY,
                    '--worker-task=0', '--values', '7', '--warmups', '3',
                    '--loops', '11', '--min-time', '5.0'
                ]
                kw = {}
                if MS_WINDOWS:
                    kw['close_fds'] = False
                elif six.PY3:
                    kw['pass_fds'] = mock.ANY
                return mock.call(args, env=mock.ANY, **kw)

            call1 = popen_call('python2')
            call2 = popen_call('python1')
            mock_subprocess.Popen.assert_has_calls([call1, call2])
Exemple #5
0
    def create_runner(self, args, **kwargs):
        # hack to be able to create multiple instances per process
        perf.Runner._created.clear()

        runner = perf.Runner(**kwargs)
        # disable CPU affinity to not pollute stdout
        runner._cpu_affinity = lambda: None
        runner.parse_args(args)
        return runner
Exemple #6
0
    def test_bench_command(self):
        args = [sys.executable, '-c', 'pass']

        runner = perf.Runner()
        runner.parse_args('-l1 -w0 -n1 --worker'.split())
        with tests.capture_stdout():
            bench = runner.bench_command('bench', args)

        self.assertEqual(bench.get_metadata()['command'],
                         ' '.join(map(repr, args)))
Exemple #7
0
 def run(self, callable_testobj):
     print("-------------------------------------------------------------")
     print("About to start benchmarking for " + self.test_title)
     print("-------------------------------------------------------------")
     runner = perf.Runner()
     myb = runner.bench_func(self.test_title, callable_testobj)
     #myb.dump('testoutput.json',compact=False,replace=True)
     print("-------------------------------------------------------------")
     print("Finished benchmarking for " + self.test_title)
     print("-------------------------------------------------------------")
Exemple #8
0
def main():
    runner = perf.Runner()
    runner.metadata['description'] = "Solver for Meteor Puzzle board"

    board, cti, pieces = get_puzzle(WIDTH, HEIGHT)
    fps = get_footprints(board, cti, pieces)
    se_nh = get_senh(board, cti)

    solve_arg = SOLVE_ARG
    runner.bench_sample_func('meteor_contest', bench_meteor_contest, board,
                             pieces, solve_arg, fps, se_nh)
Exemple #9
0
def main():
    runner = perf.Runner()
    for arg in (0, -1, 0.00001, 0.001):
        runner.bench_time_func('gevent sleep(%s)' % (arg, ),
                               bench_gevent,
                               arg,
                               inner_loops=N)
        runner.bench_time_func('eventlet sleep(%s)' % (arg, ),
                               bench_eventlet,
                               arg,
                               inner_loops=N)
Exemple #10
0
    def test_cpu_affinity_args(self):
        runner = perf.Runner()
        runner.parse_args(['-v', '--affinity=3,7'])

        with mock.patch('perf._runner.set_cpu_affinity') as mock_setaffinity:
            with tests.capture_stdout() as stdout:
                runner._cpu_affinity()

        self.assertEqual(runner.args.affinity, '3,7')
        self.assertEqual(stdout.getvalue(), 'Pin process to CPUs: 3,7\n')
        mock_setaffinity.assert_called_once_with([3, 7])
Exemple #11
0
    def test_cpu_affinity_no_isolcpus(self):
        runner = perf.Runner()
        runner.parse_args(['-v'])

        with mock.patch('perf._runner.set_cpu_affinity') as mock_setaffinity:
            with mock.patch('perf._runner.get_isolated_cpus',
                            return_value=None):
                runner._cpu_affinity()

        self.assertFalse(runner.args.affinity)
        self.assertEqual(mock_setaffinity.call_count, 0)
Exemple #12
0
def main():
    runner = perf.Runner()

    args = runner.parse_args()
    if not args.worker:
        print("%s methods involved on platform %r (psutil %s):" %
              (len(names), sys.platform, psutil.__version__))
        for name in sorted(names):
            print("    " + name)

    runner.bench_func("normal", call_normal)
    runner.bench_func("oneshot", call_oneshot)
Exemple #13
0
    def test_duplicated_named(self):
        def time_func(loops):
            return 1.0

        runner = perf.Runner()
        runner.parse_args('-l1 -w0 -n1 --worker'.split())
        with tests.capture_stdout():
            runner.bench_time_func('optim', time_func)
            with self.assertRaises(ValueError) as cm:
                runner.bench_time_func('optim', time_func)

        self.assertEqual(str(cm.exception),
                         "duplicated benchmark name: 'optim'")
def main():
    runner = perf.Runner()
    runner.metadata['description'] = "Chameleon template"

    tmpl = PageTemplate(BIGTABLE_ZPT)
    table = [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, j=10)
             for x in range(500)]
    options = {'table': table}

    func = functools.partial(tmpl, options=options)

    for i in range(1, 200):
        func()
Exemple #15
0
    def test_json_exists(self):
        with tempfile.NamedTemporaryFile('wb+') as tmp:

            runner = perf.Runner()
            with tests.capture_stdout() as stdout:
                try:
                    runner.parse_args(['--worker', '--output', tmp.name])
                except SystemExit as exc:
                    self.assertEqual(exc.code, 1)

            self.assertEqual(
                'ERROR: The JSON file %r already exists' % tmp.name,
                stdout.getvalue().rstrip())
Exemple #16
0
    def test_calibration_zero(self):
        runner = perf.Runner()
        # disable CPU affinity to not pollute stdout
        runner._cpu_affinity = lambda: None
        runner.parse_args(['--worker'])

        def time_func(loops):
            return 0

        with self.assertRaises(ValueError) as cm:
            runner.bench_time_func('bench', time_func)
        self.assertIn('error in calibration, loops is too big:',
                      str(cm.exception))
Exemple #17
0
    def test_cpu_affinity_isolcpus(self):
        runner = perf.Runner()
        runner.parse_args(['-v'])

        with mock.patch('perf._runner.set_cpu_affinity') as mock_setaffinity:
            with mock.patch('perf._runner.get_isolated_cpus',
                            return_value=[1, 2]):
                with tests.capture_stdout() as stdout:
                    runner._cpu_affinity()

        self.assertEqual(runner.args.affinity, '1-2')
        self.assertEqual(stdout.getvalue(),
                         'Pin process to isolated CPUs: 1-2\n')
        mock_setaffinity.assert_called_once_with([1, 2])
def main():
    runner = perf.Runner()
    for func, name in (
        (get_memory_gevent14, 'gevent14-py'),
        (cy_get_memory, 'inst-cy'),
        (get_memory_inst, 'inst-py'),
        (get_memory_is, 'is-py'),
    ):
        for arg_name, arg in DATA.items():
            runner.bench_time_func('%s - %s' % (name, arg_name),
                                   test,
                                   func,
                                   arg,
                                   inner_loops=N)
Exemple #19
0
    def test_calibration(self):
        runner = perf.Runner()
        # disable CPU affinity to not pollute stdout
        runner._cpu_affinity = lambda: None
        runner.parse_args(['--worker', '-w2', '-n1', '--min-time=1.0'])

        # Simulate PyPy JIT: running the same function becomes faster
        # after 2 values while running warmup values
        def time_func(loops):
            if loops < 16:
                return 0

            time_func.step += 1
            if time_func.step == 1:
                return 3.0
            elif time_func.step == 2:
                return 0.5
            else:
                return 1.0

        time_func.step = 0

        with tests.capture_stdout():
            bench = runner.bench_time_func('bench', time_func)

        runs = bench.get_runs()
        self.assertEqual(len(runs), 1)

        run = runs[0]
        self.assertEqual(
            run.warmups,
            # first calibration values are zero
            (
                (1, 0.0),
                (2, 0.0),
                (4, 0.0),
                (8, 0.0),

                # first non-zero calibration value
                (16, 3.0 / 16),

                # warmup 1, JIT triggered, 3.0 => 0.5 for loops=128
                (16, 0.5 / 16),
                # warmup 1, new try with loops x 2
                (32, 1.0 / 32),

                # warmup 2
                (32, 1.0 / 32)))
Exemple #20
0
def main():
    runner = perf.Runner()

    runner.bench_time_func('native socketpair sendall',
                           bench_native_thread_default_socketpair,
                           inner_loops=N)
    runner.bench_time_func('gevent socketpair sendall',
                           bench_gevent_greenlet_default_socketpair,
                           inner_loops=N)

    runner.bench_time_func('native udp sendto',
                           bench_native_udp,
                           inner_loops=N)
    runner.bench_time_func('gevent udp sendto',
                           bench_gevent_udp,
                           inner_loops=N)
Exemple #21
0
def main():
    if '--profile' in sys.argv:
        import cProfile
        import pstats
        import io
        pr = cProfile.Profile()
        pr.enable()
        for _ in range(2):
            bench_gevent_forked_socketpair(2)
        pr.disable()
        s = io.StringIO()
        sortby = 'cumulative'
        ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
        ps.print_stats()
        print(s.getvalue())
        return
    runner = perf.Runner()

    runner.bench_time_func(
        'gevent socketpair sendall greenlet',
        bench_gevent_greenlet_default_socketpair,
        inner_loops=N)

    runner.bench_time_func(
        'native socketpair sendall thread',
        bench_native_thread_default_socketpair,
        inner_loops=N)

    runner.bench_time_func(
        'gevent socketpair sendall fork',
        bench_gevent_forked_socketpair,
        inner_loops=N)

    runner.bench_time_func(
        'native socketpair sendall fork',
        bench_native_forked_socketpair,
        inner_loops=N)

    runner.bench_time_func(
        'native udp sendto',
        bench_native_udp,
        inner_loops=N)
    runner.bench_time_func(
        'gevent udp sendto',
        bench_gevent_udp,
        inner_loops=N)
Exemple #22
0
    def test_time_func_zero(self):
        if perf.python_has_jit():
            # If Python has a JIT, perf forces calibration which is already
            # tested by test_calibration_zero()
            self.skipTest("Python has a JIT")

        runner = perf.Runner()
        # disable CPU affinity to not pollute stdout
        runner._cpu_affinity = lambda: None
        runner.parse_args(['--worker', '-l1'])

        def time_func(loops):
            return 0

        with self.assertRaises(ValueError) as cm:
            runner.bench_time_func('bench', time_func)
        self.assertEqual(str(cm.exception), 'benchmark function returned zero')
Exemple #23
0
def main():
    runner = perf.Runner()

    runner.bench_time_func('spawn native no close_fds',
                           bench_spawn_native,
                           False,
                           inner_loops=N)
    runner.bench_time_func('spawn gevent no close_fds',
                           bench_spawn_gevent,
                           False,
                           inner_loops=N)

    runner.bench_time_func('spawn native close_fds',
                           bench_spawn_native,
                           inner_loops=N)
    runner.bench_time_func('spawn gevent close_fds',
                           bench_spawn_gevent,
                           inner_loops=N)
Exemple #24
0
def main():
    runner = perf.Runner()

    for name, obj in (('gevent', glocal()), ('gevent_sub', GLocalSub()),
                      ('native', nlocal()), ('native_sub', NativeSub())):
        _populate(obj)

        benchmarks.append(
            runner.bench_time_func('getattr_' + name,
                                   bench_getattr,
                                   obj,
                                   inner_loops=10))

        benchmarks.append(
            runner.bench_time_func('setattr_' + name,
                                   bench_setattr,
                                   obj,
                                   inner_loops=10))
Exemple #25
0
def main():
    runner = perf.Runner()

    runner.bench_time_func('imap_unordered_seq', bench_imap_un_seq)

    runner.bench_time_func('imap_unordered_par', bench_imap_un_par)

    runner.bench_time_func('imap_seq', bench_imap_seq)

    runner.bench_time_func('imap_par', bench_imap_par)

    runner.bench_time_func('map_seq', bench_map_seq)

    runner.bench_time_func('map_par', bench_map_par)

    runner.bench_time_func('apply', bench_apply)

    runner.bench_time_func('spawn', bench_spawn_wait)
Exemple #26
0
    def check_two_benchmarks(self, task=None):
        runner = perf.Runner()
        args = ['--worker', '--loops=1', '-w0', '-n3']
        if task is not None:
            args.append('--worker-task=%s' % task)
        runner.parse_args(args)

        def time_func(loops):
            return 1.0

        def time_func2(loops):
            return 2.0

        with tests.capture_stdout():
            bench1 = runner.bench_time_func('bench1', time_func)
            bench2 = runner.bench_time_func('bench2', time_func2)

        return (bench1, bench2)
def main():
    runner = perf.Runner(add_cmdline_args=add_cmdline_args)
    runner.argparser.add_argument(
        "--cases",
        help=
        "Comma separated list of cases. Available cases: %s. By default, run all cases."
        % ', '.join(CASES))
    runner.metadata['description'] = "Benchmark json.dumps()"

    args = runner.parse_args()
    if args.cases:
        cases = []
        for case in args.cases.split(','):
            case = case.strip()
            if case:
                cases.append(case)
        if not cases:
            print("ERROR: empty list of cases")
            sys.exit(1)
Exemple #28
0
def main():

    runner = perf.Runner()

    runner.bench_func('multiple wait ready',
                      bench_wait_func_ready,
                      inner_loops=N)

    runner.bench_func('wait ready',
                      bench_wait_ready,
                      inner_loops=N)

    runner.bench_func('cancel wait',
                      bench_cancel_wait,
                      inner_loops=N)

    runner.bench_func('switch',
                      bench_switch,
                      inner_loops=N)
Exemple #29
0
def main():
    global timeit

    runner = perf.Runner(add_cmdline_args=add_cmdline_args)
    cmd = runner.argparser
    choices = sorted(BENCHMARKS)
    cmd.add_argument('benchmark', nargs='?', choices=choices)

    def timeit(stmt, setup):
        name = "%s; %s" % (setup, stmt)
        runner.timeit(name, stmt, setup)

    args = runner.parse_args()
    name = args.benchmark
    if not name:
        for name in BENCHMARKS:
            bench = globals()[name]
            bench()
    else:
        bench = globals()[name]
        bench()
Exemple #30
0
def main():
    runner = perf.Runner()

    runner.bench_func('bench_unbounded_queue_noblock',
                      bench_unbounded_queue_noblock,
                      inner_loops=N)

    runner.bench_func('bench_bounded_queue_noblock',
                      bench_bounded_queue_noblock,
                      inner_loops=N)

    runner.bench_func('bench_bounded_queue_block',
                      bench_bounded_queue_block,
                      inner_loops=N)

    runner.bench_func('bench_channel',
                      bench_bounded_queue_block,
                      queue.Channel,
                      inner_loops=N)

    runner.bench_func('bench_bounded_queue_block_hub',
                      bench_bounded_queue_block,
                      queue.Queue, True,
                      inner_loops=N)

    runner.bench_func('bench_channel_hub',
                      bench_bounded_queue_block,
                      queue.Channel, True,
                      inner_loops=N)

    runner.bench_func('bench_unbounded_priority_queue_noblock',
                      bench_unbounded_queue_noblock,
                      queue.PriorityQueue,
                      inner_loops=N)

    runner.bench_func('bench_bounded_priority_queue_noblock',
                      bench_bounded_queue_noblock,
                      queue.PriorityQueue,
                      inner_loops=N)