def benchmark_wam1(url): """ Benchmark PDF WAM implementation 1 for the time it takes to perform the operation """ print "Benchmarking PDF-WAM1 (SOAPpy) using", url proxy = SOAPpy.SOAPProxy('http://loft2492.serverloft.com:8893', 'eGovMon') with bench.bench('getPDFContent'): proxy.getPDFContent(url) with bench.bench('checkacc'): proxy.checkacc(url)
def benchmark_wam2(url): """ Benchmark PDF WAM implementation 2 for the time it takes to perform the operation """ print "Benchmarking PDF-WAM2 (tornado) using", url proxy = suds.client.Client( 'http://loft2492.serverloft.com:8894/PdfContentService?wsdl') with bench.bench('getPDFContent'): proxy.service.getPDFContent(url) proxy = suds.client.Client( 'http://loft2492.serverloft.com:8894/PdfWamService?wsdl') with bench.bench('checkacc'): print proxy.service.checkacc(url)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--results-dir', required=True) parser.add_argument('--benchmark', required=True) args = parser.parse_args() benchmarks = { 'mixturemodel': (mixturemodel.latent, { '--groups': range(10, 101, 10), '--entities-per-group': [10, 100], '--features': 10, '--target-runtime': 10, }), 'irm': (irm.latent, { '--groups': range(10, 101, 10), '--entities-per-group': [10], '--features': 1, '--target-runtime': 10, }), } if args.benchmark not in benchmarks: raise ValueError( "invalid benchmark: {}".format(args.benchmark)) mkdirp(args.results_dir) def format_args(args): toks = [] for k, v in args.iteritems(): if hasattr(v, '__iter__'): for v0 in v: toks.extend([k, str(v0)]) else: toks.extend([k, str(v)]) return toks latent, benchargs = benchmarks[args.benchmark] d = os.path.join(args.results_dir, args.benchmark) mkdirp(d) nextid = get_next_id(d) benchargs = format_args(benchargs) benchargs.extend([ '--output', os.path.join(d, "{id}.json".format(id=nextid))]) bench(benchargs, latent)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--results-dir', required=True) parser.add_argument('--benchmark', required=True) args = parser.parse_args() benchmarks = { 'mixturemodel': (mixturemodel.latent, { '--groups': range(10, 101, 10), '--entities-per-group': [10, 100], '--features': 10, '--target-runtime': 10, }), 'irm': (irm.latent, { '--groups': range(10, 101, 10), '--entities-per-group': [10], '--features': 1, '--target-runtime': 10, }), } if args.benchmark not in benchmarks: raise ValueError("invalid benchmark: {}".format(args.benchmark)) mkdirp(args.results_dir) def format_args(args): toks = [] for k, v in args.iteritems(): if hasattr(v, '__iter__'): for v0 in v: toks.extend([k, str(v0)]) else: toks.extend([k, str(v)]) return toks latent, benchargs = benchmarks[args.benchmark] d = os.path.join(args.results_dir, args.benchmark) mkdirp(d) nextid = get_next_id(d) benchargs = format_args(benchargs) benchargs.extend( ['--output', os.path.join(d, "{id}.json".format(id=nextid))]) bench(benchargs, latent)
def conflict(entry_point_function, tcfg_map, conflict_files, old_ilp, new_ilp, dir_name, sol_file, emit_conflicts=False, do_cplex=False, interactive=False, silent_cplex=False, preempt_limit=None, default_phantom_preempt=False): if preempt_limit == None: preempt_limit = 5 if default_phantom_preempt: conflict_files.append( convert_loop_bounds.phantomPreemptsAnnoFileName(dir_name)) #initialise graph_to_graph so we get immFunc #load the loop_counts print 'conflict.conflict: sol_file %s' % sol_file bench.bench(dir_name, entry_point_function, False, True, False, parse_only=True) #we need the loop data immFunc().process() global bbAddr bbAddr = immFunc().bbAddr read_tcfg_map(tcfg_map) if interactive: assert False, 'Halt' if emit_conflicts: print 'new_ilp:%s' % new_ilp print_constraints(conflict_files, old_ilp, new_ilp, sol_file, preempt_limit) if do_cplex: cplex_ret = cplex.cplexSolve(new_ilp, silent=silent_cplex, sol_file=sol_file) print 'cplex_ret: %s' % cplex_ret return cplex_ret
def conflict(entry_point_function, tcfg_map, conflict_files, old_ilp, new_ilp, dir_name, sol_file, emit_conflicts=False, do_cplex=False, interactive=False, silent_cplex=False, preempt_limit= None, default_phantom_preempt=False): if preempt_limit == None: preempt_limit = 5 if default_phantom_preempt: conflict_files.append(convert_loop_bounds.phantomPreemptsAnnoFileName(dir_name)) #initialise graph_to_graph so we get immFunc #load the loop_counts print 'conflict.conflict: sol_file %s' % sol_file bench.bench(dir_name, [entry_point_function],False,True,False,parse_only=True ) #we need the loop data immFunc().process() global bbAddr bbAddr = immFunc().bbAddr read_tcfg_map(tcfg_map) if interactive: assert False, 'Halt' if emit_conflicts: print 'new_ilp:%s' % new_ilp print_constraints(conflict_files, old_ilp, new_ilp, sol_file, preempt_limit) if do_cplex: cplex_ret = cplex.cplexSolve(new_ilp,silent=silent_cplex,sol_file=sol_file) print 'cplex_ret: %s' % cplex_ret return cplex_ret
from bench import bench def test(): def task(): for _ in range(100): yield t1, t2, t3 = task(), task(), task() for _ in range(100): next(t1) next(t2) next(t3) print(int(bench(10000)))
index = self.binary_search(value, self.values, 0, len(self.values)) # index cannot be larger than the len(self.values) if index == len(self.values) or value != self.values[index]: self.values.insert(index, value) def contains(self, value): index = self.binary_search(value, self.values, 0, len(self.values)) return index < len(self.values) and value == self.values[index] def binary_search(self, value, l, start, end): middle = (start + end)/2 if end == start: return middle elif l[middle] == value: return middle # We don't need to consider middle anymore because we ruled it # out above. So we recurse with middle +/- 1. elif l[middle] < value: return self.binary_search(value, l, middle + 1, end) elif l[middle] > value: return self.binary_search(value, l, start, middle) if __name__ == "__main__": print "sortedlistset is the main" bench.bench(SortedListSet)
sys.exit(-1) else: entry_point_function = sys.argv[2] gen_heads = False load_counts = False interactive = False automated = False conflict_file = None dir_name = sys.argv[1] print 'dir_name: %s' % dir_name flag = sys.argv[3] assert flag in ['--l','--L','--i', '--x', '--xL'] if flag == '--l': gen_heads = True bench.bench(dir_name, entry_point_function, gen_heads, load_counts,interactive) sys.exit(0) if flag == '--L': load_counts = True if flag == '--i': interactive = True if flag == '--x' or flag == '--xL': if len(sys.argv) < 4: printHelp() sys.exit(-1) asm_fs = bench.init(dir_name) if flag == '--x': import convert_loop_bounds analyseFunction(entry_point_function,asm_fs, dir_name, True, False, False) print "loop heads generated" convert_loop_bounds.convert_loop_bounds(dir_name)
import timeit import bench, modular_exp REPEAT_COUNT = 20 for power_digits in range(1, 7): x = 7**400 power = 10**power_digits modulus = 11**300 print("x 7^400, power 10^%d, modulus 11^300: " % \ (power_digits)) print('\n'.join(map(lambda x: "%s: time %f s, memory %f MB" % x, \ bench.bench([modular_exp.square_modular_exp, \ modular_exp.generic_modular_exp], \ [x, power, modulus], REPEAT_COUNT)))) for power_digits in [7]: x = 7**400 power = 10**power_digits modulus = 11**300 print("x 7^400, power 10^%d, modulus 11^300: " % \ (power_digits)) print('\n'.join(map(lambda x: "%s: time %f s, memory %f MB" % x, \ bench.bench([modular_pow.square_modular_pow], \ [x, power, modulus], REPEAT_COUNT)))) for power_digits in range(100, 1000, 100): x = 7**400
from bench import bench print(bench(100000, ''' def fib(n, a=0, b=1): if n > 1: fib(n-1, b, a+b) return a if n == 0 else b ''', ''' fib(20) '''))
from bench import bench print( bench( 10, ''' def fib(n, a, b): return a if n == 0 else b if n == 1 else fib(n-1, b, a+b) ''', ''' for _ in range(10000): fib(20, 0, 1) '''))
from bench import bench print( bench( 10, ''' class Counter(): def __init__(self): self.n = 0 def inc(self): self.n += 1 def dec(self): self.n -= 1 ''', ''' for _ in range(1000): c = Counter() c.inc() c.dec() '''))
sys.exit(-1) else: entry_point_function = sys.argv[2] gen_heads = False load_counts = False interactive = False automated = False conflict_file = None dir_name = sys.argv[1] print 'dir_name: %s' % dir_name flag = sys.argv[3] assert flag in ['--l', '--L', '--i', '--x', '--xL'] if flag == '--l': gen_heads = True bench.bench(dir_name, entry_point_function, gen_heads, load_counts, interactive) sys.exit(0) if flag == '--L': load_counts = True if flag == '--i': interactive = True if flag == '--x' or flag == '--xL': if len(sys.argv) < 4: printHelp() sys.exit(-1) asm_fs = bench.init(dir_name) if flag == '--x': import convert_loop_bounds analyseFunction(entry_point_function, asm_fs, dir_name, True, False, False) print "loop heads generated"
from bench import bench print( bench( 10, ''' class Counter(): def __init__(self): self.n = 0 def inc(self, d = 1): self.n += d ''', ''' for _ in range(10000): c = Counter() c.inc() c.inc(-1) '''))
from __future__ import print_function import Pyro4 import bench obj = bench.bench() daemon = Pyro4.Daemon() uri = daemon.register(obj, "example.benchmark") print("Server running, uri = %s" % uri) daemon.requestLoop()
from bench import bench print(bench(10, '', ''' s = [] for i in range(100000): s.append(i) for _ in range(100000): s.pop() '''))
from bench import bench print(bench(1000000, '', ''' a, (b, (c, (d, (e, (f, g))))) = (1, (2, (3, (4, (5, (6, 7)))))) (a, (b, (c, (d, (e, (f, g)))))) '''))
from bench import bench def test(): try: raise 42 except Exception as e: pass print(int(bench(100000, 'test', '()')))
from bench import bench print(bench(100000, ''' def fib(n): a, b = 0, 1 for _ in range(n - 1): a, b = b, a b += a return b ''', ''' fib(20) '''))
from bench import bench def fib(n): a, b = 0, 1 for _ in range(n): a, b = b, a b += a return b print(int(bench(100000, 'fib', '(20)')))
def main(): count = 1024 func = say_hello bench(func, count=count)
import timeit import bench, prime_sieve REPEAT_COUNT = 1 for upper_bound_digits in range(7, 9): upper_bound = 10**upper_bound_digits print("upper_bound %d: " % upper_bound) print('\n'.join(map(lambda x: "%s: time %f s, memory %f MB" % x, \ bench.bench([prime_sieve.eratosthenes, prime_sieve.euler], \ [upper_bound], REPEAT_COUNT))))
for f in funcs: sys.stdout.write("%d times %s " % (iters,f.__name__)) voor = time.time() for i in range(iters): f() sys.stdout.write("%.3f\n" % (time.time()-voor)) sys.stdout.flush() duration = time.time()-begin print('total time %.3f seconds' % duration) amount=len(funcs)*iters print('total method calls: %d' % (amount)) avg_pyro_msec = 1000.0*duration/amount print('avg. time per method call: %.3f msec (%d/sec)' % (avg_pyro_msec,amount/duration)) print('-------- BENCHMARK LOCAL OBJECT ---------') object=bench.bench() begin = time.time() iters = 200000 for f in funcs: sys.stdout.write("%d times %s " % (iters,f.__name__)) voor = time.time() for i in range(iters): f() sys.stdout.write("%.3f\n" % (time.time()-voor)) sys.stdout.flush() duration = time.time()-begin print('total time %.3f seconds' % duration) amount=len(funcs)*iters print('total method calls: %d' % (amount)) avg_normal_msec = 1000.0*duration/amount print('avg. time per method call: %.3f msec (%d/sec)' % (avg_normal_msec,amount/duration//1000*1000))
from bench import bench def fib(n, a, b): return a if n == 0 else b if n == 1 else fib(n - 1, b, a + b) print(int(bench(10000, 'fib', '(20, 0, 1)')))
def main(): count = 1024 func = say_hello_generic_way bench(func, count=count)
iters = 1000 for f in funcs: sys.stdout.write("%d times %s " % (iters,f.__name__)) voor = time.time() for i in range(iters): f() sys.stdout.write("%.4f\n" % (time.time()-voor)) sys.stdout.flush() duration = time.time()-begin print('total time %.4f seconds' % duration) print('total method calls: %d' % (len(funcs)*iters)) avg_pyro_msec = 1000.0*duration/(len(funcs)*iters) print('avg. time per method call: %.4f msec' % avg_pyro_msec) print('-------- BENCHMARK LOCAL OBJECT ---------') object=bench.bench() begin = time.time() iters = 200000 for f in funcs: sys.stdout.write("%d times %s " % (iters,f.__name__)) voor = time.time() for i in range(iters): f() sys.stdout.write("%.4f\n" % (time.time()-voor)) sys.stdout.flush() duration = time.time()-begin print('total time %.4f seconds' % duration) print('total method calls: %d' % (len(funcs)*iters)) avg_normal_msec = 1000.0*duration/(len(funcs)*iters) print('avg. time per method call: %.4f msec' % avg_normal_msec) print('Normal method call is %.2f times faster than Pyro method call.'%(avg_pyro_msec/avg_normal_msec))
def randomArray(n): # kadai4実行時間計測のためランダムな配列を生成する関数 arr = arrayUtil.make1d(n) for i in range(0,n): arr[i] = random.random() return arr def ss_test_a(n): return kadai4(randomArray(n),500) def ss_test_k(k): return kadai4(randomArray(1000),k) data = [1000,2000,4000,8000,10000] result_a = bench.bench(ss_test_a,data) result_k = bench.bench(ss_test_k,data) bench.plot(result_a) bench.plot(result_k) ''' プログラムについての説明・計算量についての議論 まず今回のプログラムでは授業で扱ったmerge、mergesort関数を用いて 長い配列を2つに分割し、それぞれの小さい方から順にk個抽出し、計2k個からk個抽出するのを分割統治法を利用して行う関数kadai4を定義した。 また計算量を可視化するために授業で用いたrandomArray、ss_test(授業で用いたss_testをss_test_aとしている)も使用した。 プログラムを実行すると計算量のグラフのみが出力されるように設定してあるが、
from bench import bench print(bench(10, ''' from random import randint def bubbles(vs): done, n = False, len(vs) while not done: done, n = True, n-1 for i in range(n-1): x, y = vs[i], vs[i+1] if x > y: vs[i], vs[i+1] = y, x done = False return vs vals = [] for _ in range(100): vals.append(randint(0, 100000)) ''', ''' bubbles(vals[:]) '''))
def tree_contains(tree, value): if not tree: return False elif value == tree.node: return True elif value < tree.node: return tree_contains(tree.left, value) else: return tree_contains(tree.right, value) class UnbalancedTree(object): def __init__(self): self.tree = None def insert(self, value): self.tree = tree_insert(self.tree, value) def contains(self, value): return tree_contains(self.tree, value) if __name__ == "__main__": print "unbalancedTree is the main" bench.bench(UnbalancedTree)
from bench import bench print( bench( 10, ''' def fib(n): if n < 2: return n return fib(n-1) + fib(n-2) ''', ''' fib(20) '''))
def main(): print("==========================================================") print("| Magic Maze |") print("==========================================================") parser = argparse.ArgumentParser() parser.add_argument('-d', help = "The dungeon file to play with.") parser.add_argument('-r', type = int, help = "Use a square random dungeon of the given size.") parser.add_argument('-s', type = int, help = "Set the waiting time between moves in milliseconds.") parser.add_argument('-l', type = int, help = "Set the starting life of the player.") parser.add_argument('--pdmIteVal', action = 'store_true', help = "Solve the dungeon with value iteration.") parser.add_argument('--pdmGurobi', action = 'store_true', help = "Solve the dungeon with Gurobi.") parser.add_argument('--qLearn', action = 'store_true', help = "Solve the dungeon with Q-Learning.") parser.add_argument('--infinite', action = 'store_true', help = "Try again and again to win the game.") parser.add_argument('--bench', action = 'store_true', help = "Run the benchs.") args = vars(parser.parse_args()) if args['bench']: bench() return if args['d']: print("Loading dungeon " + args['d'] + ".") dungeon = load_dungeon(args['d']) elif args['r']: try: r = max(int(args['r']), 2) except Exception as e: print("An exception \"" + str(e) + "\" has occured, did you give an integer as -r argument ?") sys.exit() dungeon = random_dungeon_generation(r, r) else: print("No dungeon file give, a random dungeon is being creating. Here are the options if you were looking for them.") parser.print_help() dungeon = random_dungeon_generation(5, 5) try: s = max(int(args['s']), 1) except Exception as e: s = 500 try: maxLife = max(int(args['l']), 1) except Exception as e: maxLife = 1 g = Graphics(800, 1000, dungeon, s, maxLife, maxLife) if args['pdmIteVal']: g.print_footer("Welcome to Magic Maze, you are looking at the moves computed by the PDM resolution.") if args['infinite']: print_PDM(dungeon, g, maxLife, True) else: print_PDM(dungeon, g, maxLife, False) elif args['pdmGurobi']: g.print_footer("Welcome to Magic Maze, you are looking at the moves computed by the PDM resolution.") if args['infinite']: print_PDM(dungeon, g, maxLife, True, True) else: print_PDM(dungeon, g, maxLife, False, True) elif args['qLearn']: g.print_footer("Welcome to Magic Maze, you are looking at the moves computed using Q-Learning.") print_qLearning(dungeon, g, maxLife) else: g.print_footer("Welcome to Magic Maze, use keyboard arrows to play.") print_playerInput(dungeon, g, maxLife) time.sleep(1)
from microscopes.common.recarray.dataview import numpy_dataview from microscopes.mixture.model import bind, initialize import numpy as np import itertools as it import sys from bench import bench def latent(groups, entities_per_group, features, r): N = groups * entities_per_group defn = model_definition(N, [bb] * features) # generate fake data Y = np.random.random(size=(N, features)) <= 0.5 view = numpy_dataview( np.array([tuple(y) for y in Y], dtype=[('', bool)] * features)) # assign entities to their respective groups assignment = [[g] * entities_per_group for g in xrange(groups)] assignment = list(it.chain.from_iterable(assignment)) latent = bind(initialize(defn, view, r, assignment=assignment), view) latent.create_group(r) # perftest() doesnt modify group assignments return latent if __name__ == '__main__': bench(sys.argv[1:], latent)
from bench import bench print(bench(10, '', ''' x = 0.0 for _ in range(100000): x += 0.1 '''))
from bench import bench print(bench(10, ''' def fib(n): return n if n < 2 else fib(n-1) + fib(n-2) ''', ''' for _ in range(10): fib(20) '''))
from bench import bench print( bench( 10000, '', ''' list(map(lambda x: x+1, filter(lambda x: x > 48, range(100)))) ''')) print(bench(10000, '', ''' [x+1 for x in range(100) if x > 48] '''))
funcs[0]() for i, f in enumerate(funcs, start=1): print("call #%d, %d times... " % (i, iters), end="") before = time.time() for _ in range(iters): f() print("%.3f" % (time.time() - before)) duration = time.time() - begin print('total time %.3f seconds' % duration) amount = len(funcs) * iters print('total method calls: %d' % amount) avg_pyro_msec = 1000.0 * duration / amount print('avg. time per method call: %.3f msec (%d/sec) (serializer: %s)' % (avg_pyro_msec, amount / duration, Pyro4.config.SERIALIZER)) print('-------- BENCHMARK LOCAL OBJECT ---------') obj = bench.bench() begin = time.time() iters = 300000 print("warmup...") for _ in range(iters): funcs[0]() for i, f in enumerate(funcs, start=1): print("call #%d, %d times... " % (i, iters), end="") before = time.time() for _ in range(iters): f() print("%.3f" % (time.time() - before)) duration = time.time() - begin print('total time %.3f seconds' % duration) amount = len(funcs) * iters print('total method calls: %d' % amount)
#!/usr/bin/env python import bench class UnsortedListSet(object): def __init__(self): self.values = [] def insert(self, value): if not self.contains(value): self.values.append(value) def contains(self, value): for x in self.values: if x == value: return True return False if __name__ == "__main__": print "unsortedlistset is the main" bench.bench(UnsortedListSet)