Ejemplo n.º 1
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of simple AI solvers."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    util.run_benchmark(options, options.num_runs, test_n_queens)
Ejemplo n.º 2
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the Richards benchmark")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    util.run_benchmark(options, options.num_runs, test_richards)
Ejemplo n.º 3
0
def bench(argv):
    parser = optparse.OptionParser(usage="%prog [options]",
                                   description=("Run the n-body benchmark."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    offset_momentum(BODIES['sun'])  # Set up global state
    util.run_benchmark(options, options.num_runs, test_nbody)
Ejemplo n.º 4
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of regexps using Fredik Lundh's "
                     "benchmarks."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    util.run_benchmark(options, options.num_runs, test_regex_effbot)
Ejemplo n.º 5
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options] [test]",
        description=("Test the performance of simple Python-to-Python function"
                     " calls."))
    util.add_standard_options_to(parser)
    options, _ = parser.parse_args(argv)

    # Priming run.
    test_calls(1)

    util.run_benchmark(options, options.num_runs, test_calls)
Ejemplo n.º 6
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [options] [test]",
        description=("Test the performance of sequence unpacking."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    tests = {"tuple": test_tuple_unpacking, "list": test_list_unpacking}

    if len(args) > 1:
        parser.error("Can only specify one test")
    elif len(args) == 1:
        func = tests.get(args[0])
        if func is None:
            parser.error("Invalid test name")
        util.run_benchmark(options, options.num_runs, func)
    else:
        util.run_benchmark(options, options.num_runs, test_all)
Ejemplo n.º 7
0
def entry_point(argv):
    import optparse
    import util

    def parse_depths(option, opt_str, value, parser):
        parser.values.depths = [v for v in value.split(',') if v]
    
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the garbage collector benchmark")
    util.add_standard_options_to(parser)
    parser.add_option('--threads', default=0, action="store",
                      help="provide number of threads (default 1)")
    parser.add_option('--depths', default=DEFAULT_DEPTHS, type="string",
                      action="callback", callback=parse_depths,
                      help='tree depths')
    parser.add_option('--debug', default=False, action='store_true',
                      help="enable debugging")
    options, args = parser.parse_args(argv)
    util.run_benchmark(options, options.num_runs, main,
                       options.depths, options.threads, options.debug)
Ejemplo n.º 8
0
def bench(argv):
    parser = optparse.OptionParser(
        usage="%prog [pickle|unpickle] [options]",
        description=("Test the performance of pickling."))
    parser.add_option("--use_cpickle",
                      action="store_true",
                      help="Use the C version of pickle.")
    parser.add_option("--protocol",
                      action="store",
                      default=2,
                      type="int",
                      help="Which protocol to use (0, 1, 2).")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(argv)

    benchmarks = [
        "pickle", "unpickle", "pickle_list", "unpickle_list", "pickle_dict"
    ]
    for bench_name in benchmarks:
        if bench_name in args:
            benchmark = globals()["test_" + bench_name]
            break
    else:
        raise RuntimeError("Need to specify one of %s" % benchmarks)

    if options.use_cpickle:
        num_obj_copies = 8000
        import cPickle as pickle
    else:
        num_obj_copies = 200
        import pickle

    if options.protocol > 0:
        num_obj_copies *= 2  # Compensate for faster protocols.

    util.run_benchmark(options, num_obj_copies, benchmark, pickle, options)
Ejemplo n.º 9
0
2 . 1 . 3 . 2 
 3 3 . 2 . 2 
  3 . 2 . 2 
   2 2 . 1 
"""
    if output.getvalue() != expected:
        raise AssertionError("got a wrong answer:\n%s" % output.getvalue())


def main(n):
    # only run 1/25th of the requested number of iterations.
    # with the default n=50 from runner.py, this means twice.
    l = []
    for i in range(n):
        t0 = time.time()
        run_level36()
        time_elapsed = time.time() - t0
        l.append(time_elapsed)
    return l


if __name__ == "__main__":
    import util, optparse
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the hexiom2 benchmark")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, main)
Ejemplo n.º 10
0
    % endfor
</tr>
% endfor
</table>
""")

def test_mako(count, timer):
    table = [xrange(150) for _ in xrange(150)]

    # Warm up Mako.
    MAKO_TMPL.render(table = table)
    MAKO_TMPL.render(table = table)

    times = []
    for _ in xrange(count):
        t0 = timer()
        MAKO_TMPL.render(table = table)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of Mako templates."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_mako)