示例#1
0
    def _compute_values(self,
                        values,
                        nvalue,
                        is_warmup=False,
                        calibrate_loops=False,
                        start=0):
        unit = self.metadata.get('unit')
        args = self.args
        if nvalue < 1:
            raise ValueError("nvalue must be >= 1")
        if self.loops <= 0:
            raise ValueError("loops must be >= 1")

        if is_warmup:
            value_name = 'Warmup'
        else:
            value_name = 'Value'

        index = 1
        inner_loops = self.inner_loops
        if not inner_loops:
            inner_loops = 1
        while True:
            if index > nvalue:
                break

            raw_value = self.task_func(self, self.loops)
            raw_value = float(raw_value)
            value = raw_value / (self.loops * inner_loops)

            if not value and not calibrate_loops:
                raise ValueError("benchmark function returned zero")

            if is_warmup:
                values.append((self.loops, value))
            else:
                values.append(value)

            if args.verbose:
                text = format_value(unit, value)
                if is_warmup:
                    text = ('%s (loops: %s, raw: %s)' %
                            (text, format_number(
                                self.loops), format_value(unit, raw_value)))
                print("%s %s: %s" % (value_name, start + index, text))

            if calibrate_loops and raw_value < args.min_time:
                if self.loops * 2 > MAX_LOOPS:
                    print("ERROR: failed to calibrate the number of loops")
                    print("Raw timing %s with %s is still smaller than "
                          "the minimum time of %s" %
                          (format_value(unit, raw_value),
                           format_number(self.loops, 'loop'),
                           format_timedelta(args.min_time)))
                    sys.exit(1)
                self.loops *= 2
                # need more values for the calibration
                nvalue += 1

            index += 1
示例#2
0
def cmd_slowest(args):
    data = load_benchmarks(args, name=False)
    nslowest = args.n

    use_title = (data.get_nsuite() > 1)
    for item in data.iter_suites():
        if use_title:
            display_title(item.filename, 1)

        benchs = []
        for bench in item.suite:
            duration = bench.get_total_duration()
            benchs.append((duration, bench))
        benchs.sort(key=lambda item: item[0], reverse=True)

        for index, item in enumerate(benchs[:nslowest], 1):
            duration, bench = item
            print("#%s: %s (%s)" %
                  (index, bench.get_name(), format_timedelta(duration)))
示例#3
0
    def __init__(self,
                 values=None,
                 warmups=None,
                 processes=None,
                 loops=0,
                 min_time=0.1,
                 metadata=None,
                 show_name=True,
                 program_args=None,
                 add_cmdline_args=None,
                 _argparser=None):

        # Watchdog: ensure that only once instance of Runner (or a Runner
        # subclass) is created per process to prevent bad suprises
        cls = self.__class__
        key = id(cls)
        if key in cls._created:
            raise RuntimeError("only one %s instance must be created "
                               "per process: use the same instance to run "
                               "all benchmarks" % cls.__name__)
        cls._created.add(key)

        # Use lazy import to limit imports on 'import pyperf'
        import argparse

        has_jit = pyperf.python_has_jit()
        if not values:
            if has_jit:
                # Since PyPy JIT has less processes:
                # run more values per process
                values = 10
            else:
                values = 3
        if not processes:
            if has_jit:
                # Use less processes than non-JIT, because JIT requires more
                # warmups and so each worker is slower
                processes = 6
            else:
                processes = 20

        if metadata is not None:
            self.metadata = metadata
        else:
            self.metadata = {}

        # Worker task identifier: count how many times _worker() was called,
        # see the --worker-task command line option
        self._worker_task = 0

        # Set used to check that benchmark names are unique
        self._bench_names = set()

        # result of argparser.parse_args()
        self.args = None

        # callback used to prepare command line arguments to spawn a worker
        # child process. The callback is called with prepare(runner.args, cmd).
        # args must be modified in-place.
        self._add_cmdline_args = add_cmdline_args

        # Command list arguments to call the program: (sys.argv[0],) by
        # default.
        #
        # For example, "python3 -m pyperf timeit" sets program_args to
        # ('-m', 'pyperf', 'timeit').
        if program_args:
            self._program_args = program_args
        else:
            self._program_args = (sys.argv[0], )
        self._show_name = show_name

        if _argparser is not None:
            parser = _argparser
        else:
            parser = argparse.ArgumentParser()
        parser.description = 'Benchmark'
        parser.add_argument('--rigorous',
                            action="store_true",
                            help='Spend longer running tests '
                            'to get more accurate results')
        parser.add_argument('--fast',
                            action="store_true",
                            help='Get rough answers quickly')
        parser.add_argument("--debug-single-value",
                            action="store_true",
                            help="Debug mode, only compute a single value")
        parser.add_argument('-p',
                            '--processes',
                            type=strictly_positive,
                            default=processes,
                            help='number of processes used to run benchmarks '
                            '(default: %s)' % processes)
        parser.add_argument('-n',
                            '--values',
                            dest="values",
                            type=strictly_positive,
                            default=values,
                            help='number of values per process (default: %s)' %
                            values)
        parser.add_argument('-w',
                            '--warmups',
                            type=positive_or_nul,
                            help='number of skipped values per run used '
                            'to warmup the benchmark')
        parser.add_argument('-l',
                            '--loops',
                            type=positive_or_nul,
                            default=loops,
                            help='number of loops per value, 0 means '
                            'automatic calibration (default: %s)' % loops)
        parser.add_argument('-v',
                            '--verbose',
                            action="store_true",
                            help='enable verbose mode')
        parser.add_argument('-q',
                            '--quiet',
                            action="store_true",
                            help='enable quiet mode')
        parser.add_argument('--pipe',
                            type=int,
                            metavar="FD",
                            help='Write benchmarks encoded as JSON '
                            'into the pipe FD')
        parser.add_argument('-o',
                            '--output',
                            metavar='FILENAME',
                            help='write results encoded to JSON into FILENAME')
        parser.add_argument(
            '--append',
            metavar='FILENAME',
            help='append results encoded to JSON into FILENAME')
        parser.add_argument('--min-time',
                            type=float,
                            default=min_time,
                            help='Minimum duration in seconds of a single '
                            'value, used to calibrate the number of '
                            'loops (default: %s)' % format_timedelta(min_time))
        parser.add_argument('--worker',
                            action='store_true',
                            help='Worker process, run the benchmark.')
        parser.add_argument('--worker-task',
                            type=positive_or_nul,
                            metavar='TASK_ID',
                            help='Identifier of the worker task: '
                            'only execute the benchmark function TASK_ID')
        parser.add_argument('--calibrate-loops',
                            action="store_true",
                            help="calibrate the number of loops")
        parser.add_argument('--recalibrate-loops',
                            action="store_true",
                            help="recalibrate the the number of loops")
        parser.add_argument('--calibrate-warmups',
                            action="store_true",
                            help="calibrate the number of warmups")
        parser.add_argument('--recalibrate-warmups',
                            action="store_true",
                            help="recalibrate the number of warmups")
        parser.add_argument('-d',
                            '--dump',
                            action="store_true",
                            help='display benchmark run results')
        parser.add_argument('--metadata',
                            '-m',
                            action="store_true",
                            help='show metadata')
        parser.add_argument('--hist',
                            '-g',
                            action="store_true",
                            help='display an histogram of values')
        parser.add_argument('--stats',
                            '-t',
                            action="store_true",
                            help='display statistics (min, max, ...)')
        parser.add_argument("--affinity",
                            metavar="CPU_LIST",
                            default=None,
                            help='Specify CPU affinity for worker processes. '
                            'This way, benchmarks can be forced to run '
                            'on a given set of CPUs to minimize run to '
                            'run variation. By default, worker processes '
                            'are pinned to isolate CPUs if isolated CPUs '
                            'are found.')
        parser.add_argument("--inherit-environ",
                            metavar='VARS',
                            type=comma_separated,
                            help='Comma-separated list of environment '
                            'variables inherited by worker child '
                            'processes.')
        parser.add_argument("--no-locale",
                            dest="locale",
                            action="store_false",
                            default=True,
                            help="Don't copy locale environment variables "
                            "like LANG or LC_CTYPE.")
        parser.add_argument("--python",
                            default=sys.executable,
                            help='Python executable '
                            '(default: use running Python, '
                            'sys.executable)')
        parser.add_argument(
            "--compare-to",
            metavar="REF_PYTHON",
            help='Run benchmark on the Python executable REF_PYTHON, '
            'run benchmark on Python executable PYTHON, '
            'and then compare REF_PYTHON result to PYTHON result')
        parser.add_argument("--python-names",
                            metavar="REF_NAME:CHANGED_NAMED",
                            type=parse_python_names,
                            help='option used with --compare-to to name '
                            'PYTHON as CHANGED_NAME '
                            'and REF_PYTHON as REF_NAME in results')

        memory = parser.add_mutually_exclusive_group()
        memory.add_argument('--tracemalloc',
                            action="store_true",
                            help='Trace memory allocations using tracemalloc')
        memory.add_argument('--track-memory',
                            action="store_true",
                            help='Track memory usage using a thread')

        self.argparser = parser
示例#4
0
def collect_python_metadata(metadata):
    # Implementation
    impl = pyperf.python_implementation()
    metadata['python_implementation'] = impl

    # Version
    version = platform.python_version()

    match = re.search(r'\[(PyPy [^ ]+)', sys.version)
    if match:
        version = '%s (Python %s)' % (match.group(1), version)

    bits = platform.architecture()[0]
    if bits:
        if bits == '64bit':
            bits = '64-bit'
        elif bits == '32bit':
            bits = '32-bit'
        version = '%s (%s)' % (version, bits)

    # '74667320778e' in 'Python 2.7.12+ (2.7:74667320778e,'
    match = re.search(r'^[^(]+\([^:]+:([a-f0-9]{6,}\+?),', sys.version)
    if match:
        revision = match.group(1)
    else:
        # 'bbd45126bc691f669c4ebdfbd74456cd274c6b92'
        # in 'Python 2.7.10 (bbd45126bc691f669c4ebdfbd74456cd274c6b92,'
        match = re.search(r'^[^(]+\(([a-f0-9]{6,}\+?),', sys.version)
        if match:
            revision = match.group(1)
        else:
            revision = None
    if revision:
        version = '%s revision %s' % (version, revision)
    metadata['python_version'] = version

    if sys.executable:
        metadata['python_executable'] = sys.executable

    # timer
    info = time.get_clock_info('perf_counter')
    metadata['timer'] = (
        '%s, resolution: %s' %
        (info.implementation, format_timedelta(info.resolution)))

    # PYTHONHASHSEED
    if os.environ.get('PYTHONHASHSEED'):
        hash_seed = os.environ['PYTHONHASHSEED']
        try:
            if hash_seed != "random":
                hash_seed = int(hash_seed)
        except ValueError:
            pass
        else:
            metadata['python_hash_seed'] = hash_seed

    # compiler
    python_compiler = normalize_text(platform.python_compiler())
    if python_compiler:
        metadata['python_compiler'] = python_compiler

    # CFLAGS
    try:
        import sysconfig
    except ImportError:
        pass
    else:
        cflags = sysconfig.get_config_var('CFLAGS')
        if cflags:
            cflags = normalize_text(cflags)
            metadata['python_cflags'] = cflags

    # GC disabled?
    try:
        import gc
    except ImportError:
        pass
    else:
        if not gc.isenabled():
            metadata['python_gc'] = 'disabled'