Ejemplo n.º 1
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of simple AI solvers."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    util.run_benchmark(options, options.num_runs, test_n_queens)
Ejemplo n.º 2
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the Richards benchmark")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    util.run_benchmark(options, options.num_runs, test_richards)
Ejemplo n.º 3
0
def run(num_runs=1, geo_mean=True):
    # Get all our IO over with early.
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    spec_filename = os.path.join(data_dir, "html5lib_spec.html")
    with open(spec_filename) as spec_fh:
        spec_data = io.StringIO(spec_fh.read())

    util.run_benchmark(geo_mean, num_runs, test_html5lib, spec_data)
Ejemplo n.º 4
0
def run(num_runs=1, geo_mean=True):
    # Get all our IO over with early.
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    spec_filename = os.path.join(data_dir, "html5lib_spec.html")
    with open(spec_filename) as spec_fh:
        spec_data = io.StringIO(spec_fh.read())

    util.run_benchmark(geo_mean, num_runs, test_html5lib, spec_data)
Ejemplo n.º 5
0
def bench(argv):
    parser = optparse.OptionParser(usage="%prog [options]",
                                   description=("Run the n-body benchmark."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    offset_momentum(BODIES['sun'])  # Set up global state
    util.run_benchmark(options, options.num_runs, test_nbody)
Ejemplo n.º 6
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of regexps using Fredik Lundh's "
                     "benchmarks."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    util.run_benchmark(options, options.num_runs, test_regex_effbot)
Ejemplo n.º 7
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options] [test]",
        description=("Test the performance of simple Python-to-Python function"
                     " calls."))
    util.add_standard_options_to(parser)
    options, _ = parser.parse_args(argv)

    # Priming run.
    test_calls(1)

    util.run_benchmark(options, options.num_runs, test_calls)
Ejemplo n.º 8
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options] [test]",
        description=("Test the performance of sequence unpacking."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    tests = {"tuple": test_tuple_unpacking, "list": test_list_unpacking}

    if len(args) > 1:
        parser.error("Can only specify one test")
    elif len(args) == 1:
        func = tests.get(args[0])
        if func is None:
            parser.error("Invalid test name")
        util.run_benchmark(options, options.num_runs, func)
    else:
        util.run_benchmark(options, options.num_runs, test_all)
Ejemplo n.º 9
0
def entry_point(argv):
    import optparse
    import util

    def parse_depths(option, opt_str, value, parser):
        parser.values.depths = [v for v in value.split(',') if v]
    
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the garbage collector benchmark")
    util.add_standard_options_to(parser)
    parser.add_option('--threads', default=0, action="store",
                      help="provide number of threads (default 1)")
    parser.add_option('--depths', default=DEFAULT_DEPTHS, type="string",
                      action="callback", callback=parse_depths,
                      help='tree depths')
    parser.add_option('--debug', default=False, action='store_true',
                      help="enable debugging")
    options, args = parser.parse_args(argv)
    util.run_benchmark(options, options.num_runs, main,
                       options.depths, options.threads, options.debug)
Ejemplo n.º 10
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [pickle|unpickle] [options]",
        description=("Test the performance of pickling."))
    parser.add_option("--use_cpickle",
                      action="store_true",
                      help="Use the C version of pickle.")
    parser.add_option("--protocol",
                      action="store",
                      default=2,
                      type="int",
                      help="Which protocol to use (0, 1, 2).")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    benchmarks = [
        "pickle", "unpickle", "pickle_list", "unpickle_list", "pickle_dict"
    ]
    for bench_name in benchmarks:
        if bench_name in args:
            benchmark = globals()["test_" + bench_name]
            break
    else:
        raise RuntimeError("Need to specify one of %s" % benchmarks)

    if options.use_cpickle:
        num_obj_copies = 8000
        import cPickle as pickle
    else:
        num_obj_copies = 200
        import pickle

    if options.protocol > 0:
        num_obj_copies *= 2  # Compensate for faster protocols.

    util.run_benchmark(options, num_obj_copies, benchmark, pickle, options)
Ejemplo n.º 11
0

if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [pickle|unpickle] [options]", description=("Test the performance of pickling.")
    )
    parser.add_option("--use_cpickle", action="store_true", help="Use the C version of pickle.")
    parser.add_option("--protocol", action="store", default=2, type="int", help="Which protocol to use (0, 1, 2).")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    benchmarks = ["pickle", "unpickle", "pickle_list", "unpickle_list", "pickle_dict"]
    for bench_name in benchmarks:
        if bench_name in args:
            benchmark = globals()["test_" + bench_name]
            break
    else:
        raise RuntimeError("Need to specify one of %s" % benchmarks)

    if options.use_cpickle:
        num_obj_copies = 8000
        import cPickle as pickle
    else:
        num_obj_copies = 200
        import pickle

    if options.protocol > 0:
        num_obj_copies *= 2  # Compensate for faster protocols.

    util.run_benchmark(options, num_obj_copies, benchmark, pickle, options)
Ejemplo n.º 12
0
    """
    cols = list(range(queen_count))
    for vec in permutations(cols):
        if (queen_count == len({ vec[i]+i for i in cols })
                        == len({ vec[i]-i for i in cols })):
            yield vec


def test_n_queens(iterations):
    # Warm-up runs.
    list(n_queens(8))
    list(n_queens(8))

    times = []
    for _ in _xrange(iterations):
        t0 = time()
        list(n_queens(8))
        t1 = time()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of an N-Queens solvers."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_n_queens)
Ejemplo n.º 13
0
    times = []
    for _ in xrange(max(1, count // 2)):
        t0 = timer()
        # Do something simple with each path.
        for p in base_path:
            p.st_mtime
        for p in base_path.glob("*.py"):
            p.st_mtime
        for p in base_path:
            p.st_mtime
        for p in base_path.glob("*.py"):
            p.st_mtime
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of pathlib operations."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    setup()
    try:
        util.run_benchmark(options, options.num_runs, test_pathlib)
    finally:
        teardown()
Ejemplo n.º 14
0
def test_list_unpacking(iterations, timer):
    x = list(range(10))

    return do_unpacking(iterations, timer, x)


def test_all(iterations, timer):
    tuple_data = test_tuple_unpacking(iterations, timer)
    list_data = test_list_unpacking(iterations, timer)
    return [x + y for (x, y) in zip(tuple_data, list_data)]


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options] [test]",
        description=("Test the performance of sequence unpacking."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    tests = {"tuple": test_tuple_unpacking, "list": test_list_unpacking}

    if len(args) > 1:
        parser.error("Can only specify one test")
    elif len(args) == 1:
        func = tests.get(args[0])
        if func is None:
            parser.error("Invalid test name")
        util.run_benchmark(options, options.num_runs, func)
    else:
        util.run_benchmark(options, options.num_runs, test_all)
Ejemplo n.º 15
0
def run(geo_mean, num_runs):
    return util.run_benchmark(geo_mean, num_runs, test_regex_effbot)
Ejemplo n.º 16
0
# Python imports
import optparse
import time

# Local imports
import richards
import util


def test_richards(iterations):
    # Warm-up
    r = richards.Richards()
    r.run(iterations=2)

    times = []
    for _ in xrange(iterations):
        t0 = time.time()
        r.run(iterations=1)
        t1 = time.time()
        times.append(t1 - t0)
    return times

if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the Richards benchmark")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_richards)
Ejemplo n.º 17
0
        for thread in threads:
            thread.join()
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options] benchmark_name",
        description="Test the performance of Python's threads.")
    parser.add_option("--num_threads", action="store", type="int", default=2,
                      dest="num_threads", help="Number of threads to test.")
    parser.add_option("--check_interval", action="store", type="int",
                      default=sys.getcheckinterval(),
                      dest="check_interval",
                      help="Value to pass to sys.setcheckinterval().")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("incorrect number of arguments")

    bm_name = args[0].lower()
    func = globals().get("test_" + bm_name)
    if not func:
        parser.error("unknown benchmark: %s" % bm_name)

    sys.setcheckinterval(options.check_interval)
    util.run_benchmark(options, options.num_runs, func, options.num_threads)
def test_list_unpacking(iterations, timer):
    x = list(range(10))

    return do_unpacking(iterations, timer, x)


def test_all(iterations, timer):
    tuple_data = test_tuple_unpacking(iterations, timer)
    list_data = test_list_unpacking(iterations, timer)
    return [x + y for (x, y) in zip(tuple_data, list_data)]


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options] [test]",
        description=("Test the performance of sequence unpacking."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    tests = {"tuple": test_tuple_unpacking, "list": test_list_unpacking}

    if len(args) > 1:
        parser.error("Can only specify one test")
    elif len(args) == 1:
        func = tests.get(args[0])
        if func is None:
            parser.error("Invalid test name")
        util.run_benchmark(options, options.num_runs, func)
    else:
        util.run_benchmark(options, options.num_runs, test_all)
Ejemplo n.º 19
0
def test_spambayes(iterations, timer, messages, ham_classifier):
    # Prime the pump. This still leaves some hot functions uncompiled; these
    # will be noticed as hot during the timed loops below.
    for msg in messages:
        ham_classifier.score(msg)

    times = []
    for _ in xrange(iterations):
        t0 = timer()
        for msg in messages:
            ham_classifier.score(msg)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Run the SpamBayes benchmark."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    data_dir = os.path.join(os.path.dirname(__file__), "data")
    mailbox = os.path.join(data_dir, "spambayes_mailbox")
    ham_data = os.path.join(data_dir, "spambayes_hammie.pkl")
    msgs = list(mboxutils.getmbox(mailbox))
    ham_classifier = hammie.open(ham_data, "pickle", "r")
    util.run_benchmark(options, options.num_runs, test_spambayes,
                       msgs, ham_classifier)
Ejemplo n.º 20
0
    host, port = make_http_server(loop, make_application())
    url = "http://%s:%s/" % (host, port)
    times = []

    @coroutine
    def main():
        client = AsyncHTTPClient()
        for i in xrange(count):
            t0 = timer()
            futures = [client.fetch(url) for j in xrange(CONCURRENCY)]
            for fut in futures:
                resp = yield fut
                buf = resp.buffer
                buf.seek(0, 2)
                assert buf.tell() == len(CHUNK) * NCHUNKS
            t1 = timer()
            times.append(t1 - t0)

    loop.run_sync(main)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of HTTP requests with Tornado."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_tornado)
Ejemplo n.º 21
0
    """
    cols = range(queen_count)
    for vec in permutations(cols):
        if (queen_count == len(set(vec[i] + i for i in cols)) == len(
                set(vec[i] - i for i in cols))):
            yield vec


def test_n_queens(iterations, timer):
    # Warm-up runs.
    list(n_queens(8))
    list(n_queens(8))

    times = []
    for _ in xrange(iterations):
        t0 = timer()
        list(n_queens(8))
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of an N-Queens solvers."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_n_queens)
Ejemplo n.º 22
0
    v[0] = px / m
    v[1] = py / m
    v[2] = pz / m


def test_nbody(iterations):
    # Warm-up runs.
    report_energy()
    advance(0.01, 20000)
    report_energy()

    times = []
    for _ in range(iterations):
        t0 = time()
        report_energy()
        advance(0.01, 20000)
        report_energy()
        t1 = time()
        times.append(t1 - t0)
    return times


if __name__ == '__main__':
    parser = optparse.OptionParser(usage="%prog [options]",
                                   description=("Run the n-body benchmark."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    offset_momentum(BODIES['sun'])  # Set up global state
    util.run_benchmark(options, options.num_runs, test_nbody)
Ejemplo n.º 23
0
    # Warm up.
    for size in sizes:
        run_benchmarks(size)

    times = []
    for i in xrange(iterations):
        t0 = timer()
        for size in sizes:
            run_benchmarks(size)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == '__main__':
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of regexps using Fredik Lundh's "
                     "benchmarks."))
    parser.add_option("-B",
                      "--force_bytes",
                      action="store_true",
                      help="Force testing bytes regexps under 3.x.")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    if options.force_bytes:
        globals()['USE_BYTES_IN_PY3K'] = True
    util.run_benchmark(options, options.num_runs, test_regex_effbot)
Ejemplo n.º 24
0
                break
        # Assemble
        left.right = current.left
        right.left = current.right
        current.left = dummy.right
        current.right = dummy.left
        self._root = current


class Node:
    def __init__(self, key, value):
        self.key = key
        self.value = value
        self.left = None
        self.right = None

    def _traverse(self, f):
        current = self
        while current is not None:
            left = current.left
            if left is not None:
                left._traverse(f)
            f(current)
            current = current.right


if __name__ == "__main__":
    tree = splay_setup()
    run_benchmark(lambda: splay_run(tree))
    splay_tear_down(tree)
Ejemplo n.º 25
0
        bm_regex_v8.test_regex_v8(1)
    finally:
        re.compile = real_compile
        re.search = real_search
        re.sub = real_sub
    return regexes


def test_regex_compile(count):
    re._cache = EmptyCache()
    regexes = capture_regexes()
    times = []

    for _ in xrange(count):
        t0 = time.time()
        for regex, flags in regexes:
            re.compile(regex, flags)
        t1 = time.time()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test regex compilation performance"))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_regex_compile)
Ejemplo n.º 26
0
    # train it with some patterns
    n.train(pat, 5000)
    # test it
    #n.test(pat)

def time(fn, *args):
    import time, traceback
    begin = time.time()
    result = fn(*args)
    end = time.time()
    return result, end-begin

def test_bpnn(iterations):
    times = []
    for _ in range(iterations):
        result, t = time(demo)
        times.append(t)
    return times

main = test_bpnn

if __name__ == "__main__":
    import optparse
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of a neural network."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_bpnn)
Ejemplo n.º 27
0
    if not args:
        bench_func = bench_parse
    elif args[0] not in benchmarks:
        raise RuntimeError("invalid benchmark requested")
    else:
        bench_func = globals()['bench_%s' % args[0]]

    if options.no_accelerator and sys.version_info >= (3, 3):
        # prevent C accelerator from being used in 3.3
        sys.modules['_elementtree'] = None
        import xml.etree.ElementTree as et
        if et.SubElement.__module__ != 'xml.etree.ElementTree':
            raise RuntimeError("Unexpected C accelerator for ElementTree")

    try:
        from importlib import import_module
    except ImportError:
        def import_module(module_name):
            __import__(module_name)
            return sys.modules[module_name]

    try:
        etree_module = import_module(options.etree_module)
    except ImportError:
        if options.etree_module != default_etmodule:
            raise
        etree_module = import_module(fallback_etmodule)

    util.run_benchmark(options, options.num_runs,
                       run_etree_benchmark, etree_module, bench_func)
Ejemplo n.º 28
0
def report_energy(bodies=SYSTEM, pairs=PAIRS, e=0.0):
    for (((x1, y1, z1), v1, m1), ((x2, y2, z2), v2, m2)) in pairs:
        dx = x1 - x2
        dy = y1 - y2
        dz = z1 - z2
        e -= (m1 * m2) / ((dx * dx + dy * dy + dz * dz)**0.5)
    for (r, [vx, vy, vz], m) in bodies:
        e += m * (vx * vx + vy * vy + vz * vz) / 2.


def offset_momentum(ref, bodies=SYSTEM, px=0.0, py=0.0, pz=0.0):
    for (r, [vx, vy, vz], m) in bodies:
        px -= vx * m
        py -= vy * m
        pz -= vz * m
    (r, v, m) = ref
    v[0] = px / m
    v[1] = py / m
    v[2] = pz / m


def nbody():
    offset_momentum(BODIES['sun'])
    report_energy()
    advance(0.01, NUMBER_OF_ITERATIONS)
    report_energy()


if __name__ == "__main__":
    run_benchmark(nbody)
Ejemplo n.º 29
0
<tr>{% for col in row %}<td>{{ col|escape }}</td>{% endfor %}</tr>
{% endfor %}
</table>
""")

def test_django(count, timer):
    table = [xrange(150) for _ in xrange(150)]
    context = Context({"table": table})

    # Warm up Django.
    DJANGO_TMPL.render(context)
    DJANGO_TMPL.render(context)

    times = []
    for _ in xrange(count):
        t0 = timer()
        data = DJANGO_TMPL.render(context)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of Django templates."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_django)
Ejemplo n.º 30
0
def run(num_runs=100, take_geo_mean=True):
    return util.run_benchmark(take_geo_mean, num_runs, test_calls)
Ejemplo n.º 31
0
def run(num_runs=100, take_geo_mean=True):
    return util.run_benchmark(take_geo_mean, num_runs, test_regex_compile)
Ejemplo n.º 32
0
def run(num_runs=100, take_geo_mean=True):
    return util.run_benchmark(take_geo_mean, num_runs, test_calls)
Ejemplo n.º 33
0

def test_html5lib(count, spec_data):
    # No warm-up runs for this benchmark; in real life, the parser doesn't get
    # to warm up (this isn't a daemon process).

    times = []
    for _ in xrange(count):
        spec_data.seek(0)
        t0 = time.time()
        html5lib.parse(spec_data)
        t1 = time.time()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of the html5lib parser."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    # Get all our IO over with early.
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    spec_filename = os.path.join(data_dir, "html5lib_spec.html")
    with open(spec_filename) as spec_fh:
        spec_data = StringIO.StringIO(spec_fh.read())

    util.run_benchmark(options, options.num_runs, test_html5lib, spec_data)
Ejemplo n.º 34
0
if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options] benchmark_name",
        description="Test the performance of Python's threads.")
    parser.add_option("--num_threads",
                      action="store",
                      type="int",
                      default=2,
                      dest="num_threads",
                      help="Number of threads to test.")
    parser.add_option("--check_interval",
                      action="store",
                      type="int",
                      default=sys.getcheckinterval(),
                      dest="check_interval",
                      help="Value to pass to sys.setcheckinterval().")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("incorrect number of arguments")

    bm_name = args[0].lower()
    func = globals().get("test_" + bm_name)
    if not func:
        parser.error("unknown benchmark: %s" % bm_name)

    sys.setcheckinterval(options.check_interval)
    util.run_benchmark(options, options.num_runs, func, options.num_threads)
Ejemplo n.º 35
0
                y = extract(z, 3)
                while y != extract(z, 4):
                    z = compose(z, next(x))
                    y = extract(z, 3)
                z = compose((10, -10*y, 0, 1), z)
                yield y

        return list(_islice(pi_digits(), n))

    # Warm-up runs.
    calc_ndigits(NDIGITS)
    calc_ndigits(NDIGITS)

    times = []
    for _ in xrange(iterations):
        t0 = timer()
        calc_ndigits(NDIGITS)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of pi calculation."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_pidgits)
Ejemplo n.º 36
0
import bigtable

# bummer, timeit module is stupid
from bigtable import test_python_cstringio, test_spitfire_o4, test_spitfire


def runtest(n, benchmark):
    times = []
    for i in range(n):
        sys.stdout = StringIO()
        bigtable.run([benchmark], 100)
        times.append(float(sys.stdout.getvalue().split(" ")[-2]))
        sys.stdout = sys.__stdout__
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]", description="Test the performance of the spitfire benchmark"
    )
    parser.add_option(
        "--benchmark",
        type="choice",
        choices=["python_cstringio", "spitfire_o4"],
        default="spitfire_o4",
        help="choose between cstringio and spitfire_o4",
    )
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(sys.argv)
    util.run_benchmark(options, options.num_runs, runtest, options.benchmark)
Ejemplo n.º 37
0
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        f.foo(1, 2, 3, 4)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options] [test]",
        description=("Test the performance of simple Python-to-Python method"
                     " calls."))
    util.add_standard_options_to(parser)
    options, _ = parser.parse_args()

    # Priming run.
    test_calls(1, time.time)

    util.run_benchmark(options, options.num_runs, test_calls)
Ejemplo n.º 38
0
        re.search(regexs[id], string_tables[n][id])
        re.search(regexs[id], string_tables[n][id])


def test_regex_effbot(iterations):
    sizes = init_benchmarks()

    # Warm up.
    for size in sizes:
        run_benchmarks(size)

    times = []
    for i in xrange(iterations):
        t0 = time.time()
        for size in sizes:
            run_benchmarks(size)
        t1 = time.time()
        times.append(t1 - t0)
    return times


if __name__ == '__main__':
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of regexps using Fredik Lundh's "
                     "benchmarks."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_regex_effbot)
Ejemplo n.º 39
0
    % endfor
</tr>
% endfor
</table>
""")

def test_mako(count, timer):
    table = [xrange(150) for _ in xrange(150)]

    # Warm up Mako.
    MAKO_TMPL.render(table = table)
    MAKO_TMPL.render(table = table)

    times = []
    for _ in xrange(count):
        t0 = timer()
        MAKO_TMPL.render(table = table)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of Mako templates."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_mako)
Ejemplo n.º 40
0
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        tmpl.render(context)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    setup()
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of Django templates using "
                     "Rietveld's front page template."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    tmpl, context = get_benchmark_data()
    util.run_benchmark(options, options.num_runs, test_rietveld, tmpl, context)
Ejemplo n.º 41
0
def run(geo_mean=True, num_runs=10):
    return util.run_benchmark(geo_mean, num_runs, test_nbody)
Ejemplo n.º 42
0
    times = []
    for _ in xrange(max(1, count // 2)):
        t0 = timer()
        # Do something simple with each path.
        for p in base_path:
            p.st_mtime
        for p in base_path.glob("*.py"):
            p.st_mtime
        for p in base_path:
            p.st_mtime
        for p in base_path.glob("*.py"):
            p.st_mtime
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of pathlib operations."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    setup()
    try:
        util.run_benchmark(options, options.num_runs, test_pathlib)
    finally:
        teardown()
Ejemplo n.º 43
0
  3 4 4 . 3 
 2 . . 3 4 3 
2 . 1 . 3 . 2 
 3 3 . 2 . 2 
  3 . 2 . 2 
   2 2 . 1 
"""
    if output.getvalue() != expected:
        raise AssertionError("got a wrong answer:\n%s" % output.getvalue())

def main(n, timer):
    # only run 1/25th of the requested number of iterations.
    # with the default n=50 from runner.py, this means twice.
    l = []
    for i in xrange(n):
        t0 = timer()
        run_level36()
        time_elapsed = timer() - t0
        l.append(time_elapsed)
    return l

if __name__ == "__main__":
    import util, optparse
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the hexiom2 benchmark")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, 1, main)
Ejemplo n.º 44
0
            GVector(1.662000, 4.360280, 0.000000)],
            3, [0, 0, 0, 1, 1, 1, 2, 2, 2]),
        Spline([
            GVector(2.804500, 4.017350, 0.000000),
            GVector(2.550500, 3.525230, 0.000000),
            GVector(1.979010, 2.620360, 0.000000),
            GVector(1.979010, 2.620360, 0.000000)],
            3, [0, 0, 0, 1, 1, 1]),
        Spline([
            GVector(2.001670, 4.011320, 0.000000),
            GVector(2.335040, 3.312830, 0.000000),
            GVector(2.366800, 3.233460, 0.000000),
            GVector(2.366800, 3.233460, 0.000000)],
            3, [0, 0, 0, 1, 1, 1])
        ]
    c = Chaosgame(splines, 0.25)
    return c.create_image_chaos(timer, 1000, 1200, n)



if __name__ == "__main__":
    import util
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the Chaos benchmark")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, main)

Ejemplo n.º 45
0
def time(fn, *args):
    from time import time
    begin = time()
    result = await_one(fn(*args))
    end = time()
    return result, end-begin


def benchmark(N):
    times = []
    for _ in range(N):
        result, t = time(bm_await_nested, 1000)
        times.append(t)
        assert result == 8221043302, result
    return times


main = benchmark

if __name__ == "__main__":
    import optparse
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Micro benchmarks for generators.")

    import util
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, benchmark)
Ejemplo n.º 46
0
    host, port = make_http_server(loop, make_application())
    url = "http://%s:%s/" % (host, port)
    times = []

    @coroutine
    def main():
        client = AsyncHTTPClient()
        for i in xrange(count):
            t0 = timer()
            futures = [client.fetch(url) for j in xrange(CONCURRENCY)]
            for fut in futures:
                resp = yield fut
                buf = resp.buffer
                buf.seek(0, 2)
                assert buf.tell() == len(CHUNK) * NCHUNKS
            t1 = timer()
            times.append(t1 - t0)

    loop.run_sync(main)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of HTTP requests with Tornado."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_tornado)
Ejemplo n.º 47
0
            json.loads(json_dict_group)
            json.loads(json_dict_group)
            json.loads(json_dict_group)
            json.loads(json_dict_group)
            json.loads(json_dict_group)
            json.loads(json_dict_group)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [json_dump|json_load] [options]",
        description=("Test the performance of JSON (de)serializing."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    benchmarks = ["json_dump", "json_load"]
    for bench_name in benchmarks:
        if bench_name in args:
            benchmark = globals()["test_" + bench_name]
            break
    else:
        raise RuntimeError("Need to specify one of %s" % benchmarks)

    num_obj_copies = 8000
    import json

    util.run_benchmark(options, num_obj_copies, benchmark, json, options)
Ejemplo n.º 48
0
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        foo(1, 2, 3, 4)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options] [test]",
        description=("Test the performance of simple Python-to-Python function"
                     " calls."))
    util.add_standard_options_to(parser)
    options, _ = parser.parse_args()

    # Priming run.
    test_calls(1, time.time)

    util.run_benchmark(options, options.num_runs, test_calls)
Ejemplo n.º 49
0
        usage="%prog [no_output|simple_output|formatted_output] [options]",
        description=("Test the performance of logging."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    benchmarks = ["no_output", "simple_output", "formatted_output"]
    for bench_name in benchmarks:
        if bench_name in args:
            benchmark = globals()["test_" + bench_name]
            break
    else:
        raise RuntimeError("Need to specify one of %s" % benchmarks)

    # NOTE: StringIO performance will impact the results...
    if sys.version_info >= (3,):
        sio = io.StringIO()
    else:
        sio = io.BytesIO()
    handler = logging.StreamHandler(stream=sio)
    logger = logging.getLogger("benchlogger")
    logger.propagate = False
    logger.addHandler(handler)
    logger.setLevel(logging.WARNING)

    util.run_benchmark(options, options.num_runs, benchmark, logger)

    if benchmark is not test_no_output:
        assert len(sio.getvalue()) > 0
    else:
        assert len(sio.getvalue()) == 0
Ejemplo n.º 50
0
    lookup.put_string('base.mako', BASE_TEMPLATE)
    lookup.put_string('page.mako', PAGE_TEMPLATE)

    template = Template(CONTENT_TEMPLATE, lookup=lookup)

    table = [xrange(150) for i in xrange(150)]
    paragraphs = xrange(50)
    title = 'Hello world!'

    times = []
    for i in range(count):
        t0 = time.time()
        data = template.render(table=table,
                               paragraphs=paragraphs,
                               lorem=LOREM_IPSUM,
                               title=title,
                               img_count=50)
        t1 = time.time()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of Mako templates."))
    util.add_standard_options_to(parser)
    (options, args) = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_mako)
Ejemplo n.º 51
0
{% endfor %}
</table>
""")


def test_django(count, timer):
    table = [xrange(150) for _ in xrange(150)]
    context = Context({"table": table})

    # Warm up Django.
    DJANGO_TMPL.render(context)
    DJANGO_TMPL.render(context)

    times = []
    for _ in xrange(count):
        t0 = timer()
        data = DJANGO_TMPL.render(context)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of Django templates."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_django)
Ejemplo n.º 52
0
2 . 1 . 3 . 2 
 3 3 . 2 . 2 
  3 . 2 . 2 
   2 2 . 1 
"""
    if output.getvalue() != expected:
        raise AssertionError("got a wrong answer:\n%s" % output.getvalue())


def main(n):
    # only run 1/25th of the requested number of iterations.
    # with the default n=50 from runner.py, this means twice.
    l = []
    for i in range(n):
        t0 = time.time()
        run_level36()
        time_elapsed = time.time() - t0
        l.append(time_elapsed)
    return l


if __name__ == "__main__":
    import util, optparse
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the hexiom2 benchmark")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, main)
Ejemplo n.º 53
0
    v[1] = py / m
    v[2] = pz / m


def test_nbody(iterations):
    # Warm-up runs.
    report_energy()
    advance(0.01, 20000)
    report_energy()

    times = []
    for _ in range(iterations):
        t0 = time()
        report_energy()
        advance(0.01, 20000)
        report_energy()
        t1 = time()
        times.append(t1 - t0)
    return times


if __name__ == '__main__':
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Run the n-body benchmark."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    offset_momentum(BODIES['sun'])  # Set up global state
    util.run_benchmark(options, options.num_runs, test_nbody)