* reading them from disk
    * creating the CParser object
    """
    files = list(_iter_files())

    elapsed = 0
    times = []
    for _ in range(loops):
        times.append(pyperf.perf_counter())
        # This is a macro benchmark for a Python implementation
        # so "elapsed" covers more than just how long parser.parse() takes.
        t0 = pyperf.perf_counter()
        parse_files(files)
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
    times.append(pyperf.perf_counter())
    return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    maybe_handle_legacy(_bench_pycparser)

    runner = pyperf.Runner()
    runner.metadata['description'] = "Test the performance of pycparser"
    runner.bench_time_func("pycparser", bench_pycparser)
    times = []
    for _ in range(loops):
        # This is a macro benchmark for a Python implementation
        # so "elapsed" covers more than just how long the Addressbook ops take.
        t0 = pyperf.perf_counter()
        for _ in range(100):
            # First, create the addressbook.
            ab = make_addressbook()
            # Then, round-trip through serialization.
            encoded = serialize(ab, proto_factory)
            ab2 = ttypes.AddressBook()
            deserialize(ab2, encoded, proto_factory)
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
        times.append(t0)
    times.append(pyperf.perf_counter())
    return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    maybe_handle_legacy(_bench_thrift)

    runner = pyperf.Runner()
    runner.metadata['description'] = "Test the performance of thrift"
    runner.bench_time_func("thrift", bench_thrift)
        elapsed += t1 - t0
        times.append(t0)
        if legacy and (i % 100 == 0):
            print(i, t0 - start)
    times.append(pyperf.perf_counter())
    if legacy:
        total = times[-1] - start
        print("%.2fs (%.2f ms / %.3freq/s)" % (total, total / loops * 1e3, loops / total))
    return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    if "--legacy" in sys.argv:
        with netutils.serving(ARGV, DATADIR, "127.0.0.1:8000"):
            maybe_handle_legacy(_bench_flask_requests, legacyarg='legacy')

    if "--worker" not in sys.argv:
        context = netutils.serving(ARGV, DATADIR, "127.0.0.1:8000")
    else:
        context = nullcontext()

    with context:
        runner = pyperf.Runner()
        runner.metadata['description'] = "Test the performance of flask"
        runner.bench_time_func("flaskblogging", bench_flask_requests)
        times.append(t0)
        if legacy and (i % 100 == 0):
            print(i, t0 - start)
    times.append(pyperf.perf_counter())
    if legacy:
        total = times[-1] - start
        print("%.2fs (%.3freq/s)" % (total, loops / total))
    return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    maybe_handle_legacy(_bench_kinto, legacyarg='legacy')

    if NGINX is None:
        raise Exception("nginx is not installed")

    with ExitStack() as stack:
        if "--worker" not in sys.argv:
            cmd = [PYTHON, SETUP_PY, "develop"]
            proc = subprocess.run(
                cmd,
                cwd=DATADIR,
                stdout=subprocess.DEVNULL,
                stderr=subprocess.STDOUT,
            )

            if proc.returncode != 0:
    times = []
    with open(os.devnull, "w") as devnull:
        for i in range(loops):
            if legacy:
                print(i)
            # This is a macro benchmark for a Python implementation
            # so "elapsed" covers more than just how long main() takes.
            t0 = pyperf.perf_counter()
            try:
                main(None, devnull, devnull, TARGETS, clean_exit=True)
            except SystemExit:
                pass
            t1 = pyperf.perf_counter()

            elapsed += t1 - t0
            times.append(t0)
        times.append(pyperf.perf_counter())
    return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    maybe_handle_legacy(_bench_mypy, legacyarg='legacy')

    runner = pyperf.Runner()
    runner.metadata['description'] = "Test the performance of mypy types"
    runner.bench_time_func("mypy", bench_mypy)
            sitedir, elapsed = setup(datadir)
            if args.legacy:
                print("%.2fs to initialize db" % (elapsed,))
                print(f"site created in {sitedir}")
            if not args.serve:
                print(f"now run {sys.executable} {sys.argv[0]} --serve {datadir}")
        else:
            # This is what a previous call to setup() would have returned.
            sitedir = os.path.join(datadir, SITE_NAME)

        # Then run the benchmark.
        if args.serve:
            if "--worker" not in sys.argv:
                context = netutils.serving(ARGV_SERVE, sitedir, "127.0.0.1:8000")
            else:
                context = nullcontext()

            with context:
                if args.legacy:
                    from legacyutils import maybe_handle_legacy
                    sys.argv[1:] = ["--legacy"] + legacy_args
                    maybe_handle_legacy(_bench_djangocms_requests, sitedir, legacyarg='legacy')
                    sys.exit(0)

                runner.datadir = datadir

                def time_func(loops, *args):
                    return bench_djangocms_requests(*args, loops=loops)
                runner.bench_time_func("djangocms", time_func, sitedir,
                                       inner_loops=INNER_LOOPS)
예제 #7
0
    pylint seems to speed up considerably as it progresses, and this
    benchmark includes that.
    """
    elapsed = 0
    times = []
    for i in range(loops):
        print(i)
        # This is a macro benchmark for a Python implementation
        # so "elapsed" covers more than just how long Run() takes.
        t0 = pyperf.perf_counter()
        reporter = NullReporter()
        Run(TARGETS, exit=False, reporter=reporter)
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
        times.append(t0)
    times.append(pyperf.perf_counter())
    return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    maybe_handle_legacy(_bench_pylint)

    runner = pyperf.Runner()
    runner.metadata['description'] = "Test the performance of pylint"
    runner.bench_time_func("pylint", bench_pylint)
예제 #8
0
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
        times.append(t0)
        if legacy and (i % 100 == 0):
            print(i, t0 - start)
    times.append(pyperf.perf_counter())
    if legacy:
        total = times[-1] - start
        print("%.2fs (%.3freq/s)" % (total, loops / total))
    print(loops, elapsed / loops)
    return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    maybe_handle_legacy(_bench_aiohttp_requests, legacyarg='legacy')

    if "--worker" not in sys.argv:
        context = netutils.serving(ARGV, DATADIR, "127.0.0.1:8080")
    else:
        context = nullcontext()

    with context:
        runner = pyperf.Runner()
        runner.metadata['description'] = "Test the performance of aiohttp"
        runner.bench_time_func("aiohttp", bench_aiohttp_requests)
예제 #9
0
    input_batch = input_tensor.unsqueeze(0)  # create a mini-batch as expected by the model

    with torch.no_grad():
        elapsed = 0
        times = []
        for i in range(loops):
            if legacy and (i % 10 == 0):
                print(i)
            # This is a macro benchmark for a Python implementation
            # so "elapsed" covers more than just how long model() takes.
            t0 = pyperf.perf_counter()
            output = model(input_batch)
            t1 = pyperf.perf_counter()

            elapsed += t1 - t0
            times.append(t0)
        times.append(pyperf.perf_counter())
        return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    maybe_handle_legacy(_bench_pytorch, legacyarg='legacy')

    runner = pyperf.Runner()
    runner.metadata['description'] = "Test the performance of pytorch"
    runner.bench_time_func("pytorch", bench_pytorch)
예제 #10
0
        requests_get("http://localhost:8000/blog/").text
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
        times.append(t0)
        if legacy and (i % 100 == 0):
            print(i, t0 - start)
    times.append(pyperf.perf_counter())
    if legacy:
        total = times[-1] - start
        print("%.2fs (%.3freq/s)" % (total, loops / total))
    return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    maybe_handle_legacy(_bench_gunicorn, legacyarg='legacy')

    if "--worker" not in sys.argv:
        context = netutils.serving(ARGV, DATADIR, ADDR)
    else:
        context = nullcontext()

    with context:
        runner = pyperf.Runner()
        runner.metadata['description'] = "Test the performance of gunicorn"
        runner.bench_time_func("gunicorn", bench_gunicorn)
예제 #11
0
    lines = s.splitlines()

    elapsed = 0
    times = []
    for _ in range(loops):
        # This is a macro benchmark for a Python implementation
        # so "elapsed" covers more than just how long json.loads() takes.
        t0 = pyperf.perf_counter()
        for text in lines:
            if not text:
                continue
            json.loads(text)
        t1 = pyperf.perf_counter()

        elapsed += t1 - t0
        times.append(t0)
    times.append(pyperf.perf_counter())
    return elapsed, times


#############################
# the script

if __name__ == "__main__":
    from legacyutils import maybe_handle_legacy
    maybe_handle_legacy(_bench_json_loads)

    runner = pyperf.Runner()
    runner.metadata['description'] = "Test the performance of json"
    runner.bench_time_func("json", bench_json_loads)