Example #1
0
def main(params):
    # 3.8 changed the default event loop to ProactorEventLoop which doesn't
    # implement everything required by tornado and breaks this benchmark.
    # Restore the old WindowsSelectorEventLoop default for now.
    # https://bugs.python.org/issue37373
    # https://github.com/python/pyperformance/issues/61
    # https://github.com/tornadoweb/tornado/pull/2686
    nloops = ('loops' in params) and int(params['loops']) or 1
    workers = ('workers' in params) and int(params['workers']) or 1

    if sys.platform == 'win32' and sys.version_info[:2] == (3, 8):
        import asyncio
        asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())

    kw = {}
    kw['loops'] = nloops
    if pyperf.python_has_jit():
        # PyPy needs to compute more warmup values to warmup its JIT
        kw['warmups'] = 30
    runner = pyperf.Runner(**kw)

    threads = []
    for i in range(workers):
        threads.append(
            threading.Thread(target=functionWorker, args=[runner, i]))

    for idx, thread in enumerate(threads):
        thread.start()
        thread.join()

    out = 'Executed ' + str(workers) + ' threads'
    result = {'output': out}
    return (result)
Example #2
0
    def test_bench(self):
        loops = 4
        args = ('-p', '2',
                '-w', '1',
                '-n', '3',
                '-l', str(loops),
                '--min-time', '0.001',
                '-s', 'import time',
                SLEEP)
        args = PERF_TIMEIT + args
        bench, stdout = self.run_timeit_bench(args)

        # FIXME: skipped test, since calibration continues during warmup
        if not pyperf.python_has_jit():
            for run in bench.get_runs():
                self.assertEqual(run.get_total_loops(), 4)

        runs = bench.get_runs()
        self.assertEqual(len(runs), 2)
        for run in runs:
            self.assertIsInstance(run, pyperf.Run)
            raw_values = run._get_raw_values(warmups=True)
            self.assertEqual(len(raw_values), 4)
            for raw_value in raw_values:
                ms = (raw_value / loops) * 1e3
                self.assertTrue(MIN_VALUE <= ms <= MAX_VALUE, ms)
Example #3
0
def main(params):
    kw = {}
    if pyperf.python_has_jit():
        # PyPy needs to compute more warmup values to warmup its JIT
        kw['warmups'] = 50
    runner = pyperf.Runner(**kw, loops=1)
    runner.metadata['description'] = "Test the performance of the Go benchmark"
    runner.bench_func('go', versus_cpu)

    return({'Result':'Success'})
Example #4
0
def functionWorker(tname, allocate_pkey):
    if allocate_pkey:
        pkey_thread_mapper(tname)
    kw = {}
    if pyperf.python_has_jit():
        # PyPy needs to compute more warmup values to warmup its JIT
        kw['warmups'] = 50
    runner = pyperf.Runner(**kw, loops=1)
    runner.metadata['description'] = "Test the performance of the Go benchmark"
    runner.bench_func('go', versus_cpu)
    del runner
    pymem_reset()
Example #5
0
def functionWorker(tname, allocate_pkey):
    if allocate_pkey:
        pkey_thread_mapper(tname)
    kw = {'add_cmdline_args': add_cmdline_args}
    if pyperf.python_has_jit():
        # PyPy needs to compute more warmup values to warmup its JIT
        kw['warmups'] = 15
    runner = pyperf.Runner(**kw, loops=1)
    levels = sorted(LEVELS)
    # runner.argparser.add_argument("--level", type=int,
    #                               choices=levels,
    #                               default=DEFAULT_LEVEL,
    #                               help="Hexiom board level (default: %s)"
    #                                    % DEFAULT_LEVEL)

    # args = runner.parse_args()
    # runner.metadata['description'] = "Solver of Hexiom board game"
    # runner.metadata['hexiom_level'] = args.level

    runner.bench_time_func('hexiom', main, DEFAULT_LEVEL)
    del runner
    pymem_reset()
Example #6
0
    def __init__(self,
                 values=None,
                 warmups=None,
                 processes=None,
                 loops=0,
                 min_time=0.1,
                 metadata=None,
                 show_name=True,
                 program_args=None,
                 add_cmdline_args=None,
                 _argparser=None):

        # Watchdog: ensure that only once instance of Runner (or a Runner
        # subclass) is created per process to prevent bad suprises
        cls = self.__class__
        key = id(cls)
        if key in cls._created:
            raise RuntimeError("only one %s instance must be created "
                               "per process: use the same instance to run "
                               "all benchmarks" % cls.__name__)
        cls._created.add(key)

        # Use lazy import to limit imports on 'import pyperf'
        import argparse

        has_jit = pyperf.python_has_jit()
        if not values:
            if has_jit:
                # Since PyPy JIT has less processes:
                # run more values per process
                values = 10
            else:
                values = 3
        if not processes:
            if has_jit:
                # Use less processes than non-JIT, because JIT requires more
                # warmups and so each worker is slower
                processes = 6
            else:
                processes = 20

        if metadata is not None:
            self.metadata = metadata
        else:
            self.metadata = {}

        # Worker task identifier: count how many times _worker() was called,
        # see the --worker-task command line option
        self._worker_task = 0

        # Set used to check that benchmark names are unique
        self._bench_names = set()

        # result of argparser.parse_args()
        self.args = None

        # callback used to prepare command line arguments to spawn a worker
        # child process. The callback is called with prepare(runner.args, cmd).
        # args must be modified in-place.
        self._add_cmdline_args = add_cmdline_args

        # Command list arguments to call the program: (sys.argv[0],) by
        # default.
        #
        # For example, "python3 -m pyperf timeit" sets program_args to
        # ('-m', 'pyperf', 'timeit').
        if program_args:
            self._program_args = program_args
        else:
            self._program_args = (sys.argv[0], )
        self._show_name = show_name

        if _argparser is not None:
            parser = _argparser
        else:
            parser = argparse.ArgumentParser()
        parser.description = 'Benchmark'
        parser.add_argument('--rigorous',
                            action="store_true",
                            help='Spend longer running tests '
                            'to get more accurate results')
        parser.add_argument('--fast',
                            action="store_true",
                            help='Get rough answers quickly')
        parser.add_argument("--debug-single-value",
                            action="store_true",
                            help="Debug mode, only compute a single value")
        parser.add_argument('-p',
                            '--processes',
                            type=strictly_positive,
                            default=processes,
                            help='number of processes used to run benchmarks '
                            '(default: %s)' % processes)
        parser.add_argument('-n',
                            '--values',
                            dest="values",
                            type=strictly_positive,
                            default=values,
                            help='number of values per process (default: %s)' %
                            values)
        parser.add_argument('-w',
                            '--warmups',
                            type=positive_or_nul,
                            help='number of skipped values per run used '
                            'to warmup the benchmark')
        parser.add_argument('-l',
                            '--loops',
                            type=positive_or_nul,
                            default=loops,
                            help='number of loops per value, 0 means '
                            'automatic calibration (default: %s)' % loops)
        parser.add_argument('-v',
                            '--verbose',
                            action="store_true",
                            help='enable verbose mode')
        parser.add_argument('-q',
                            '--quiet',
                            action="store_true",
                            help='enable quiet mode')
        parser.add_argument('--pipe',
                            type=int,
                            metavar="FD",
                            help='Write benchmarks encoded as JSON '
                            'into the pipe FD')
        parser.add_argument('-o',
                            '--output',
                            metavar='FILENAME',
                            help='write results encoded to JSON into FILENAME')
        parser.add_argument(
            '--append',
            metavar='FILENAME',
            help='append results encoded to JSON into FILENAME')
        parser.add_argument('--min-time',
                            type=float,
                            default=min_time,
                            help='Minimum duration in seconds of a single '
                            'value, used to calibrate the number of '
                            'loops (default: %s)' % format_timedelta(min_time))
        parser.add_argument('--worker',
                            action='store_true',
                            help='Worker process, run the benchmark.')
        parser.add_argument('--worker-task',
                            type=positive_or_nul,
                            metavar='TASK_ID',
                            help='Identifier of the worker task: '
                            'only execute the benchmark function TASK_ID')
        parser.add_argument('--calibrate-loops',
                            action="store_true",
                            help="calibrate the number of loops")
        parser.add_argument('--recalibrate-loops',
                            action="store_true",
                            help="recalibrate the the number of loops")
        parser.add_argument('--calibrate-warmups',
                            action="store_true",
                            help="calibrate the number of warmups")
        parser.add_argument('--recalibrate-warmups',
                            action="store_true",
                            help="recalibrate the number of warmups")
        parser.add_argument('-d',
                            '--dump',
                            action="store_true",
                            help='display benchmark run results')
        parser.add_argument('--metadata',
                            '-m',
                            action="store_true",
                            help='show metadata')
        parser.add_argument('--hist',
                            '-g',
                            action="store_true",
                            help='display an histogram of values')
        parser.add_argument('--stats',
                            '-t',
                            action="store_true",
                            help='display statistics (min, max, ...)')
        parser.add_argument("--affinity",
                            metavar="CPU_LIST",
                            default=None,
                            help='Specify CPU affinity for worker processes. '
                            'This way, benchmarks can be forced to run '
                            'on a given set of CPUs to minimize run to '
                            'run variation. By default, worker processes '
                            'are pinned to isolate CPUs if isolated CPUs '
                            'are found.')
        parser.add_argument("--inherit-environ",
                            metavar='VARS',
                            type=comma_separated,
                            help='Comma-separated list of environment '
                            'variables inherited by worker child '
                            'processes.')
        parser.add_argument("--no-locale",
                            dest="locale",
                            action="store_false",
                            default=True,
                            help="Don't copy locale environment variables "
                            "like LANG or LC_CTYPE.")
        parser.add_argument("--python",
                            default=sys.executable,
                            help='Python executable '
                            '(default: use running Python, '
                            'sys.executable)')
        parser.add_argument(
            "--compare-to",
            metavar="REF_PYTHON",
            help='Run benchmark on the Python executable REF_PYTHON, '
            'run benchmark on Python executable PYTHON, '
            'and then compare REF_PYTHON result to PYTHON result')
        parser.add_argument("--python-names",
                            metavar="REF_NAME:CHANGED_NAMED",
                            type=parse_python_names,
                            help='option used with --compare-to to name '
                            'PYTHON as CHANGED_NAME '
                            'and REF_PYTHON as REF_NAME in results')

        memory = parser.add_mutually_exclusive_group()
        memory.add_argument('--tracemalloc',
                            action="store_true",
                            help='Trace memory allocations using tracemalloc')
        memory.add_argument('--track-memory',
                            action="store_true",
                            help='Track memory usage using a thread')

        self.argparser = parser
Example #7
0
    def _process_args_impl(self):
        args = self.args

        if args.pipe:
            args.quiet = True
            args.verbose = False
        elif args.quiet:
            args.verbose = False

        has_jit = pyperf.python_has_jit()
        if args.warmups is None and not args.worker and not has_jit:
            args.warmups = 1

        nprocess = self.argparser.get_default('processes')
        nvalues = self.argparser.get_default('values')
        if args.rigorous:
            args.processes = nprocess * 2
            # args.values = nvalues * 5 // 3
        elif args.fast:
            # use at least 3 processes to benchmark 3 different (randomized)
            # hash functions
            args.processes = max(nprocess // 2, 3)
            args.values = max(nvalues * 2 // 3, 2)
        elif args.debug_single_value:
            args.processes = 1
            args.warmups = 0
            args.values = 1
            args.loops = 1
            args.min_time = 1e-9

        # calibration
        if args.calibrate_loops:
            self._only_in_worker("--calibrate-loops")
            if args.loops:
                raise CLIError("--loops=N is incompatible with "
                               "--calibrate-loops")
        elif args.recalibrate_loops:
            self._only_in_worker("--recalibrate-loops")
            if args.loops < 1:
                raise CLIError("--recalibrate-loops requires --loops=N")
        elif args.calibrate_warmups:
            self._only_in_worker("--calibrate-warmups")
            if args.loops < 1:
                raise CLIError("--calibrate-warmups requires --loops=N")
        elif args.recalibrate_warmups:
            self._only_in_worker("--recalibrate-warmups")
            if args.loops < 1 or args.warmups is None:
                raise CLIError("--recalibrate-warmups requires "
                               "--loops=N and --warmups=N")
        else:
            if args.worker and args.loops < 1:
                raise CLIError("--worker requires --loops=N "
                               "or --calibrate-loops")
            if args.worker and args.warmups is None:
                raise CLIError("--worker requires --warmups=N "
                               "or --calibrate-warmups")

            if args.values < 1:
                raise CLIError("--values must be >= 1")

        filename = args.output
        if filename and os.path.exists(filename):
            raise CLIError("The JSON file %r already exists" % filename)

        if args.worker_task:
            self._only_in_worker("--worker-task")

        if args.tracemalloc:
            try:
                import tracemalloc  # noqa
            except ImportError as exc:
                raise CLIError("fail to import tracemalloc: %s" % exc)

        if args.track_memory:
            if MS_WINDOWS:
                from pyperf._win_memory import check_tracking_memory
            else:
                from pyperf._memory import check_tracking_memory
            err_msg = check_tracking_memory()
            if err_msg:
                raise CLIError("unable to track the memory usage "
                               "(--track-memory): %s" % err_msg)

        args.python = abs_executable(args.python)
        if args.compare_to:
            args.compare_to = abs_executable(args.compare_to)

        if args.compare_to:
            for option in ('output', 'append'):
                if getattr(args, option):
                    raise CLIError("--%s option is incompatible "
                                   "with --compare-to option" % option)
    output = '\n'.join(line.rstrip() for line in output.splitlines())
    if output != expected:
        raise AssertionError("got a wrong answer:\n%s\nexpected: %s" %
                             (output, expected))

    return dt


def add_cmdline_args(cmd, args):
    cmd.extend(("--level", str(args.level)))


if __name__ == "__main__":
    kw = {'add_cmdline_args': add_cmdline_args}
    if pyperf.python_has_jit():
        # PyPy needs to compute more warmup values to warmup its JIT
        kw['warmups'] = 15
    runner = pyperf.Runner(**kw)
    levels = sorted(LEVELS)
    runner.argparser.add_argument("--level",
                                  type=int,
                                  choices=levels,
                                  default=DEFAULT_LEVEL,
                                  help="Hexiom board level (default: %s)" %
                                  DEFAULT_LEVEL)

    args = runner.parse_args()
    runner.metadata['description'] = "Solver of Hexiom board game"
    runner.metadata['hexiom_level'] = args.level
Example #9
0
 def test_python_has_jit(self):
     jit = pyperf.python_has_jit()
     self.assertIsInstance(jit, bool)