def test_performance(self): call, args = self.get_callable(*self.django_filter_args()) df_time = min(repeat( lambda: call(*args), number=self.iterations, repeat=self.repeat, )) call, args = self.get_callable(*self.rest_framework_filters_args()) drf_time = min(repeat( lambda: call(*args), number=self.iterations, repeat=self.repeat, )) diff = (drf_time - df_time) / df_time * 100.0 if verbosity >= 2: print('\n' + '-' * 32) print('%s performance' % self.label) print('django-filter time:\t%.4fs' % df_time) print('drf-filters time:\t%.4fs' % drf_time) print('performance diff:\t%+.2f%% ' % diff) print('-' * 32) self.assertTrue(drf_time < (df_time * self.threshold))
def time_update(function, truncate, imsize, picture, input_im, sigma_r, sigma_s, lw, num_thread=None): #cython parameters imsize0 = imsize[0] imsize1 = imsize[1] output = picture*0. output5 = np.array(output, np.float32) input_im5 = np.array(input_im, np.float32) if num_thread is None: times = timeit.repeat(lambda: function(sigma_s, sigma_r, input_im5, imsize0, imsize1, output5, lw), number=3, repeat=5) else: times = timeit.repeat(lambda: function(sigma_s, sigma_r, input_im5, imsize0, imsize1, output5, lw, num_thread), number=3, repeat=5) print("{}: {}s".format(str(function) , min(times))) return min(times)
def run_case(docs, words, word_range): SETUP = ( 'import random;' 'from {module} import {func} as func;' 'from __main__ import generate_doclist;' 'random.seed("tidovsoctavian");' 'docs_list = generate_doclist({docs}, {words}, {word_range})' ) octavian = timeit.repeat( "func(docs_list)", setup=SETUP.format( module='set_similarity_octavian', func='similarity', docs=docs, words=words, word_range=word_range), number=NUMBER, repeat=REPEAT) tido = timeit.repeat( "func(docs_list)", setup=SETUP.format( module='set_similarity_tido', func='print_similar_docs', docs=docs, words=words, word_range=word_range), number=NUMBER, repeat=REPEAT) return {'octavian': octavian, 'tido': tido}
def main(): for m in maps: for y in range(MAP_HEIGHT): for x in range(MAP_WIDTH): tcod.map_set_properties(m, x, y, True, True) for thread in threads: thread.start() print('Python %s\n%s\n%s' % (sys.version, platform.platform(), platform.processor())) print('\nComputing field-of-view for %i empty %ix%i maps.' % (len(maps), MAP_WIDTH, MAP_HEIGHT)) single_time = min(timeit.repeat(test_fov_single, number=1)) print('1 thread: %.2fms' % (single_time * 1000)) multi_time = min(timeit.repeat(test_fov_threads, number=1)) print('%i threads: %.2fms' % (THREADS, multi_time * 1000)) print('%.2f%% efficiency' % (single_time / (multi_time * THREADS) * 100)) print('\nComputing AStar from corner to corner %i times on seperate empty' ' %ix%i maps.' % (PATH_NUMBER, MAP_WIDTH, MAP_HEIGHT)) single_time = min(timeit.repeat(test_astar_single, number=1)) print('1 thread: %.2fms' % (single_time * 1000)) multi_time = min(timeit.repeat(test_astar_threads, number=1)) print('%i threads: %.2fms' % (THREADS, multi_time * 1000)) print('%.2f%% efficiency' % (single_time / (multi_time * THREADS) * 100))
def run_profile(): print 'oh yeah' setup=''' from whiskeynode import WhiskeyNode from whiskeynode import whiskeycache from whiskeynode.db import db default_sort = [('_id', -1)] class Node(WhiskeyNode): COLLECTION_NAME = 'test_node' COLLECTION = db[COLLECTION_NAME] FIELDS = { 'myVar':int, } def __init__(self, *args, **kwargs): WhiskeyNode.__init__(self, *args, **kwargs) nodes = [Node({'myVar':i}) for i in range(10000)] ''' query=''' whiskeycache.find(Node, {"myVar":{"$gt":500}}, default_sort) ''' N = 1 R = 3 print timeit.repeat(query, setup=setup, repeat=R, number=N)
def find_breaking_point(f1, f2, input_generator, start=1, step=1, limit=1000000, trial_count=1000, repeat_count=3): """ Find size of input arguments (n0) for which f2(n0) is faster than f1(n0). - f1, f2 - functions to test. - input_generator - function that receives current size of input arguments and returns input data in form of tuple with first item - list of non-keyword arguments and second item - dict of keyword arguments. - start - initial input data size. - step - iteration step. - limit - maximum size of input data. - trial_count - count of executions of f1/f2 on each iteration. - repeat_count - to repeat trials several times and use average performance value. returns n0 - size of input data for which f2(n0) is faster than f1(n0) or None if reaches limit. """ for n in range(start, limit+1): curr_input = input_generator(n) # Test first function f1_results = timeit.repeat(lambda: f1(*curr_input[0], **curr_input[1]), repeat=repeat_count, number=trial_count) f1_avg = sum(f1_results) / len(f1_results) # Test second function f2_results = timeit.repeat(lambda: f2(*curr_input[0], **curr_input[1]), repeat=repeat_count, number=trial_count) f2_avg = sum(f2_results) / len(f2_results) # Compare performance if f2_avg < f1_avg: return n return None
def main(): '''test functions''' init() if len(argv) > 1: n = int(argv[1]) else: print('nchess.py, usage:\nn print repeat functions') return if len(argv) > 2: print_sol = bool(int(argv[2])) functions = [perm_all, perm_op1, perm_op2, perm_op3, perm_op4, perm_op5] if len(argv) > 3: repeats = int(argv[3]) else: repeats = 100 if len(argv) > 4: for func in argv[4:]: print() funcstr = str(functions[int(func)]).split(' ')[1] print(funcstr) if print_sol: print(min(timeit.repeat('print({}({}))'.format(funcstr, n), setup='from __main__ import '+funcstr, repeat=repeats, number=1))) else: print(min(timeit.repeat('{}({})'.format(funcstr, n), setup='from __main__ import '+funcstr, repeat=repeats, number=1))) else: print(function(n) for function in functions[1:])
def time_big_inputs(self): n_elems_range = np.linspace(1, 2**self.max_exponent, num=self.num_test_points, dtype=int).tolist() times_merge = [] times_counting = [] sort_case= BigInputsSortCase('merge') for i,n_elems in enumerate(n_elems_range): print(i) sort_case.setup(n_elems=n_elems, max_elem=n_elems) print('max:' + str(max(sort_case.test_input))) elapsed_time = min(timeit.repeat(sort_case.sort, number=1, repeat=self.num_repeats))*1000 times_merge.append(elapsed_time) sort_case = BigInputsSortCase('counting') for i, n_elems in enumerate(n_elems_range): print(i) sort_case.setup(n_elems=n_elems, max_elem=n_elems) elapsed_time = min(timeit.repeat(sort_case.sort, number=1, repeat=self.num_repeats))*1000 times_counting.append(elapsed_time) # plot both plt.plot(n_elems_range, times_merge, color='red', label='Merge sort', linestyle='-', marker='o') plt.plot(n_elems_range, times_counting, color='blue', label='Counting sort', linestyle='-', marker='o') plt.title('Big Inputs case') plt.xlabel('size (length) of the input') plt.ylabel('ms.') plt.legend(loc='upper left', frameon=True) plt.show()
def main(): print("Calling on 10: " + str(sum_of_primes(10))) print("Timing new Prime Method:") print("Old Method") print(timeit.repeat("sum_of_primes(2000000)", "from __main__ import sum_of_primes", number =1)) print("New Method") print(timeit.repeat("better_sum_of_primes(2000000)", "from __main__ import better_sum_of_primes", number =1))
def time_already_sorted(self): max_value_range = np.linspace(1, 2**self.max_exponent, num=self.num_test_points, dtype=int).tolist() times_diff_merge = [] times_diff_counting = [] for n_elems in max_value_range: sort_case = AlreadySortedSortCase('merge') sort_case.setup(n_elems=n_elems) elapsed_time_sorted = min(timeit.repeat(sort_case.sort, number=1, repeat=self.num_repeats))*1000 sort_case = ManualSortCases('merge') sort_case.setup(np.random.permutation(n_elems).tolist()) elapsed_time_permuted = min(timeit.repeat(sort_case.sort, number=1, repeat=self.num_repeats))*1000 diff_merge = elapsed_time_permuted - elapsed_time_sorted print(diff_merge) times_diff_merge.append(diff_merge) for n_elems in max_value_range: sort_case = AlreadySortedSortCase('counting') sort_case.setup(n_elems=n_elems) elapsed_time_sorted = min(timeit.repeat(sort_case.sort, number=1, repeat=self.num_repeats))*1000 sort_case = ManualSortCases('counting') sort_case.setup(np.random.permutation(n_elems).tolist()) elapsed_time_permuted = min(timeit.repeat(sort_case.sort, number=1, repeat=self.num_repeats))*1000 diff_count = elapsed_time_permuted - elapsed_time_sorted print(diff_count) times_diff_counting.append(diff_count) # plot both plt.plot(max_value_range, times_diff_merge, color='red', label='Merge sort', linestyle='-', marker='o') plt.plot(max_value_range, times_diff_counting, color='blue', label='Counting sort', linestyle='-', marker='o') plt.title('Already Sorted case') plt.xlabel('size (length) of the input') plt.ylabel(r'$RT_{PERMUTED} - RT_{SORTED}$ [ms.]') plt.legend(loc='upper left', frameon=True) plt.show()
def performance(name, size, loops=100): libpath = os.path.join(os.getcwd(), name) sys.path.append(libpath) try: atm except NameError: import isa imp.reload(isa) times = [] for element in size: element = int(element) if element == 1: time = repeat('atm(0.)', setup='from isa import atm', number=loops, repeat=3) elif element > 1: time = repeat('atm(h)', setup='from isa import atm\n' 'from numpy import linspace\n' 'h = linspace(0., 11000., {})' .format(element), number=loops, repeat=3) time = 1e3 * min(time) / loops times.append(time) sys.path.remove(libpath) return times
def main(conf): """Run timed benchmark""" read_sequence = [1, 2, 16, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 262144] write_sequence = [1, 2, 16, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 262144] read_results = [] write_results = [] prepare_files(conf) for i in read_sequence: read_results.append((i, min( timeit.repeat("read_mark(%s, filehandle)" % i, setup = conf['setup_read'], repeat=conf['repeat'], number=conf['number'])))) for i in write_sequence: write_results.append((i, min( timeit.repeat("write_mark(%s, filehandle, data)" % i, setup = conf['setup_write'], repeat=conf['repeat'], number=conf['number'])))) out = pprint.PrettyPrinter() out.pprint(read_results) out.pprint(write_results)
def _measure_performance(): import timeit _sqdiff_numba = _make_sqdiff_numba() print "All times in ms numpy\tnumba" print "type \tnumpy\tnumba\tC\tspeedup\tspeedup\tsize\talignment" for _ in range(100): frame_cropped, template, template_transparent = _random_template() for l, t in [("template ", template), ("with mask", template_transparent), ("unmasked ", template_transparent[:, :, :3])]: # pylint: disable=cell-var-from-loop np_time = min(timeit.repeat( lambda: _sqdiff_numpy(t, frame_cropped), repeat=3, number=10)) / 10 c_time = min(timeit.repeat( lambda: _sqdiff_c(t, frame_cropped), repeat=3, number=10)) / 10 if _sqdiff_numba: numba_time = min(timeit.repeat( lambda: _sqdiff_numba(t, frame_cropped), repeat=3, number=10)) / 10 else: numba_time = float('nan') print "%s\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%i x %i \t%s" % ( l, np_time * 1000, numba_time * 1000, c_time * 1000, np_time / c_time, numba_time / c_time, frame_cropped.shape[1], frame_cropped.shape[0], frame_cropped.ctypes.data % 8)
def do_it(cmd, data_str, num_threads, globals, number, repeat, divisor): if num_threads == 1: times = timeit.repeat('%s(%s, core)' % (cmd, data_str), globals=globals, number=number, repeat=repeat) else: times = timeit.repeat('_run_x(%s, %s, %s, core=core)' % (cmd, data_str, num_threads), globals=globals, number=number, repeat=repeat) print_time(cmd, times, divisor)
def measuringExecutionTimes(self): print "---- measuringExecutionTimes() ----" def f(x): return x * x import timeit print timeit.repeat("for x in range(100): lambda x: x*10","", number=100000)
def time_fit_predict(clf, dfx, dfy, var='TF', num=10, rp=3): '''time fit and predict with classifier clf on training set dfx, dfy using num loops and rp repeats''' # print("time_fit_predict: var", var) # dfy['TF'] has two states (0, 1) def fit_clf(): do_fit(clf, dfx, dfy['TF']) # should run predict on test not train data def predict_clf(): do_predict(clf, dfx, dfy['TF']) # dfy['Y'] has six states (1-6) def fit_clf_multi(): do_fit(clf, dfx, dfy['Y']) def predict_clf_multi(): do_predict(clf, dfx, dfy['Y']) if var=='Y': tfit = min(timeit.repeat(fit_clf_multi, repeat=rp, number=num)) tpred = min(timeit.repeat(predict_clf_multi, repeat=rp, number=num)) else: tfit = min(timeit.repeat(fit_clf, repeat=rp, number=num)) tpred = min(timeit.repeat(predict_clf, repeat=rp, number=num)) tfit = tfit * 1e3 / num tpred = tpred * 1e3 / num return tfit, tpred
def plot_case(n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None): global table1, output_text n_rows = (10000, 20000, 50000, 100000, 200000) # include 200000 for publish run numbers = (1, 1, 1, 1, 1) repeats = (3, 2, 1, 1, 1) times_fast = [] times_fast_parallel = [] times_pandas = [] for n_row, number, repeat in zip(n_rows, numbers, repeats): table1 = NamedTemporaryFile() make_table(table1, n_row, n_floats, n_ints, n_strs, float_format, str_val) t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, use_fast_converter=True)", setup='from __main__ import ascii, table1', number=number, repeat=repeat) times_fast.append(min(t) / number) t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, parallel=True, use_fast_converter=True)", setup='from __main__ import ascii, table1', number=number, repeat=repeat) times_fast_parallel.append(min(t) / number) t = timeit.repeat("pandas.read_csv(table1.name, sep=' ', header=0)", setup='from __main__ import table1, pandas', number=number, repeat=repeat) times_pandas.append(min(t) / number) plt.loglog(n_rows, times_fast, '-or', label='io.ascii Fast-c') plt.loglog(n_rows, times_fast_parallel, '-og', label='io.ascii Parallel Fast-c') plt.loglog(n_rows, times_pandas, '-oc', label='Pandas') plt.grid() plt.legend(loc='best') plt.title('n_floats={} n_ints={} n_strs={} float_format={} str_val={}'.format( n_floats, n_ints, n_strs, float_format, str_val)) plt.xlabel('Number of rows') plt.ylabel('Time (sec)') img_file = 'graph{}.png'.format(len(output_text) + 1) plt.savefig(img_file) plt.clf() text = 'Pandas to io.ascii Fast-C speed ratio: {:.2f} : 1<br/>'.format(times_fast[-1] / times_pandas[-1]) text += 'io.ascii parallel to Pandas speed ratio: {:.2f} : 1'.format(times_pandas[-1] / times_fast_parallel[-1]) output_text.append((img_file, text))
def warm_up(): log.info('Warming up the Pypy JIT compiler...') timeit.repeat( stmt=protobuf, setup=protobuf_setup, repeat=3, number=10**4, )
def reverseString3(originalString): print min(timeit.repeat("''.join(reversed('world'))")) # 2.2613844704083021 print min(timeit.repeat("'world'[::-1]")) # 0.28049658041891234 print min(timeit.repeat("start=stop=None; step=-1; 'world'[start:stop:step]")) # 0.37622163503510819 print min(timeit.repeat("start=stop=None; step=-1; reverse_slice = slice(start, stop, step); 'world'[reverse_slice]"))
def main(): #print("Timing bruteforce Method:") #print(timeit.repeat("superNaive()", "from __main__ import superNaive", number=1)) print("timing smarterNaive Method:") print(timeit.repeat("smarterNaive()", "from __main__ import smarterNaive", number=1)) print("timing number Theory Method:") print(timeit.repeat("smartest()", "from __main__ import smartest", number=1))
def benchmark(): log.info('Calculating number of loops...') # In order to make the overhead insignificant run the benchmark at # least 100*overhead times. overhead = timeit.repeat(stmt='pass') target = min(overhead) * 100 # Find the number of loops by trying successive powers of 10 until the total # time is >= overhead. loops = 0 sample = 0 while sample < target: loops += 1 # Use the slowest of the two code samples, protobuf sample = timeit.timeit( stmt=protobuf, setup=protobuf_setup, number=10**loops, ) loops = 10**loops if loops < 10000: loops = 10000 repeat = 3 log.info('Running protobuf benchmark...') times = timeit.repeat( stmt=protobuf, setup=protobuf_setup, repeat=repeat, number=loops, ) buftime = min(times) log.info('Running protolite benchmark...') times = timeit.repeat( stmt=protolite, setup=protolite_setup, repeat=repeat, number=loops, ) litetime = min(times) bufmsg = dict([ ('loops', loops), ('repeat', repeat), ('secs', buftime), ('speed', litetime/buftime), ]) litemsg = dict([ ('loops', loops), ('repeat', repeat), ('secs', litetime), ('speed', buftime/litetime), ]) results = dict([ ('protobuf', bufmsg), ('protolite', litemsg), ]) log.info('Results:') print json.dumps(results, indent=2)
def use_fullpage(self, address_space): """Calibrate the scanner to ensure fastest speed""" # Define the calibration functions timeit_fullpage = lambda: list(self.scan_page(address_space, 0, True)) timeit_nonfullpage = lambda: list(self.scan_page(address_space, 0, False)) with_fullpage = timeit.repeat(timeit_fullpage, number = 100) without_fullpage = timeit.repeat(timeit_nonfullpage, number = 100) return min(with_fullpage) < min(without_fullpage)
def test_performance_overhead_no_override(self): import timeit t1 = min(timeit.repeat(_SuperSimpleTestDeriv, number=self.test_number)) t2 = min(timeit.repeat(_SuperCoopSimpleTestDeriv, number=self.test_number)) print print "No override -- " print " Manual: ", t1 print " Coop: ", t2 print " Ratio: ", t2/t1
def test_performance_overhead_with_params(self): import timeit t1 = min(timeit.repeat(_TestDeriv, number=self.test_number)) t2 = min(timeit.repeat(_CoopTestDeriv, number=self.test_number)) print print "Params -- " print " Manual: ", t1 print " Coop: ", t2 print " Ratio: ", t2/t1
def run(): print("Test Suite 1 : ", end="\n\n") print("Primarily tests cost of function call, hashing and cache hits.") print("Benchmark script based on") print(" http://bugs.python.org/file28400/lru_cache_bench.py", end="\n\n") _print_single_speedup(init=True) results = [] args = ["i", '"spam", i', '"spam", "spam", i', "a=i", 'a="spam", b=i', 'a="spam", b="spam", c=i'] for a in args: for f in ["_py_untyped", "_c_untyped", "_py_typed", "_c_typed"]: s = "%s(%s)" % (f, a) t = min( timeit.repeat( """ for i in range(100): {} """.format( s ), setup="from fastcache.benchmark import %s" % f, repeat=10, number=1000, ) ) results.append([t, s]) _print_single_speedup(results[-4:]) _print_speedup(results) print("\n\nTest Suite 2 :", end="\n\n") print("Tests millions of misses and millions of hits to quantify") print("cache behavior when cache is full.", end="\n\n") setup = "from fastcache.benchmark import {}\n" + "from fastcache.benchmark import _arg_gen" results = [] for f in ["_py_untyped", "_c_untyped", "_py_typed", "_c_typed"]: s = '%s(i, j, a="spammy")' % f t = min( timeit.repeat( """ for i, j in _arg_gen(): %s """ % s, setup=setup.format(f), repeat=3, number=100, ) ) results.append([t, s]) _print_single_speedup(init=True) _print_single_speedup(results)
def timethese(n=1): import timeit setup = 'from __main__ import test, fibrecur, fibiter' t1 = timeit.repeat('test(fibrecur)', setup, number=n) t2 = timeit.repeat('test(fibiter)', setup, number=n) print 'recursive', t1 print 'iterative', t2 print 'Difference', min(t1) / min(t2)
def test_big_object_performance(self): t1 = max(timeit.repeat('dumps(d)', 'from dpark.serialize import dumps;' 'd = {(str(i),):i for i in xrange(10000)}', repeat=3, number=1)) t2 = max(timeit.repeat('dumps(d, -1)', 'from pickle import dumps;' 'd = {(str(i),):i for i in xrange(10000)}', repeat=3, number=1)) assert t1 < t2 * 2.5
def test(): for test_key in test_keys: test_name = 'test_' + test_key test = globals()[test_name] setup = 'from __main__ import gizmo' t_present = min(timeit.repeat(test, setup=setup)) del gizmo.gadget t_absent = min(timeit.repeat(test, setup=setup)) gizmo.gadget = True print('{:7} {:.3f} {:.3f}'.format(test_key, t_present, t_absent))
def timethese(): import timeit setup = 'from __main__ import test, shcopy, urlretr' n = 50 t1 = timeit.repeat('test(shcopy)', setup, number=n) t2 = timeit.repeat('test(urlretr)', setup, number=n) print 'shcopy', t1 print 'urlretr', t2 print 'Difference', min(t1) / min(t2)
def get_exec_time(line, times, counts): endswith_exec_time_list = timeit.repeat('line_endswith(line)', 'from __main__ import line_endswith,line', repeat=times, number=counts) endswith_exec_time_list.sort() slice_exec_time_list = timeit.repeat('line_slice(line)', 'from __main__ import line_slice,line', repeat=times, number=counts) slice_exec_time_list.sort() print "endswith: %s(s)" % endswith_exec_time_list[-1] print "slice : %s(s)" % slice_exec_time_list[-1]
def do_timing(s): print(ttls[s].__name__) print(timeit.repeat(f"ttl({s})", "from __main__ import ttl", number=10000))
} # %% demo_process = Process( func=add_me, map_inputs=lambda config, state: { "x": state['foo']['bar'] + 1, "y": config['hello']['val'] }, map_outputs=lambda result, prev_state: { **prev_state, **{"foo": {"bar": result}}, } ) # %% state_out = run_process(state, demo_process, config) print(state_out) # %% # Compare timeings from timeit import repeat t1 = min(repeat(lambda: run_process(state, demo_process, config))) print(t1) # 0.6141055879998021
def bench_func(func, *args, **kwargs): def closure_func(): return func(*args, **kwargs) return repeat(closure_func, number=1, repeat=BENCH_REPEATS)
if np.allclose(C, Z): print("Test passed") else: print("Test failed") if __name__ == '__main__': import timeit import sys # system information print("Python: " + sys.version) print("Numpy : " + np.version.version) np.__config__.show() # setup snippet timingSetup = """ import numpy as np from __main__ import AlmightyCorrcoefEinsumOptimized O = np.random.rand(int(1E5),int(1E3)) P = np.random.rand(int(1E5), 256) """ # timing print( min( timeit.repeat("AlmightyCorrcoefEinsumOptimized(O, P)", setup=timingSetup, repeat=3, number=1)))
import time import timeit import text_example import memory_profiler import dawg if __name__ == "__main__": print "RAM at start {:0.1f}MiB".format(memory_profiler.memory_usage()[0]) # avoid building a temporary list of words in Python, store directly in the # DAWG t1 = time.time() words_dawg = dawg.DAWG(text_example.readers) t2 = time.time() print "RAM after creating dawg {:0.1f}MiB, took {:0.1f}s".format( memory_profiler.memory_usage()[0], t2 - t1) assert u'Zwiebel' in words_dawg time_cost = sum( timeit.repeat(stmt="u'Zwiebel' in words_dawg", setup="from __main__ import words_dawg", number=1, repeat=10000)) print "Summed time to lookup word {:0.4f}s".format(time_cost)
def fact_for(n): # Fehler, falls n < 0 oder nicht ganzzahlig if n < 0 or np.trunc(n) != n: raise Exception('The factorial is defined only for positive integers') factorial = 1 for factor in range(1, n + 1): factorial = factor * factorial return factorial t_rec = timeit.repeat("fact_rec(500)", "from __main__ import fact_rec", number=10) t_for = timeit.repeat("fact_for(500)", "from __main__ import fact_for", number=10) print(t_rec) print(t_for) print( "Average factor of calculation time between recursive and iterative approach: " ) print(np.average(t_rec) / np.average(t_for)) print([str(n) + "! = " + str(fact_for(n)) for n in range(190, 201)]) print("float(170!) = " + str(float(fact_for(170))))
)) dpctl.set_default_queue("opencl", "gpu", 0) print("SYCL({}) result: {}".format( dpctl.get_current_queue().get_sycl_device().get_device_name(), sb.columnwise_total(X), )) import timeit print("Times for 'opencl:cpu:0'") print( timeit.repeat( stmt="sb.columnwise_total(X)", setup='dpctl.set_default_queue("opencl", "cpu", 0); ' "sb.columnwise_total(X)", # ensure JIT compilation is not counted number=100, globals=globals(), )) print("Times for 'opencl:gpu:0'") print( timeit.repeat( stmt="sb.columnwise_total(X)", setup= 'dpctl.set_default_queue("opencl", "gpu", 0); sb.columnwise_total(X)', number=100, globals=globals(), )) print("Times for NumPy")
from math import sin, cos, radians import timeit def bench(): product = 1.0 for counter in range(1, 1000, 1): for dex in list(range(1, 360, 1)): angle = radians(dex) product *= sin(angle)**2 + cos(angle)**2 return product if __name__ == '__main__': result = timeit.repeat(stmt=bench, setup='from math import sin, cos, radians', number=10, repeat=10) result = list(sorted(result)) final_result = ((3 - result[:1][0]) * 1 / 1.8) * 100 print(final_result)
def clock(label, cmd): res = timeit.repeat(cmd, setup=SETUP, number=TIMES) print(label, *('{:.3f}'.format(x) for x in res))
from timeit import repeat str_nums1 = """ numbers = str(random.randint(1,100)) for i in range(1000): num = random.randint(1,100) numbers += ', ' + str(num)""" str_nums2 = """ numbers = [str(random.randint(1,100)) for i in range(1,1000)] numbers = ', '.join(numbers)""" tds1 = repeat(str_nums1, number=1000, repeat=4, setup='import random') tds2 = repeat(str_nums2, number=1000, repeat=4, setup='import random') print("Results from using repeat()") print(tds1, tds2, sep="\n") print('-' * 70) print('str_nums2 compared to str_nums1:') print('{:.2%}'.format(sum(tds2) / sum(tds1))) print('-' * 70) print('str_nums1 compared to str_nums2:') print('{:.2%}'.format(sum(tds1) / sum(tds2)))
def mapCall(): return list(map(abs, replist)) def genExpr(): return list(abs(x) for x in replist) def genFunc(): def gen(): for x in replist: yield abs(x) return list(gen()) print(sys.version) for test in (forLoop, listComp, mapCall, genExpr, genFunc): (bestof, (total, result)) = timer.bestoftotal(5, 1000, test) print('%-9s: %.5f => [%s...%s]' % (test.__name__, bestof, result[0], result[-1])) # timeit module import timeit timeit.repeat() ## combined with min() gives the best time of run min(timeit.repeat(stmt="[x**2 for x in range(1000)]", number=1000, repeat=5)) import chessboard chessboard.chessboard() min(timeit.repeat(chessboard.chessboard2(1000), number=1000, repeat=5))
import matplotlib.pyplot as plt plt.switch_backend('Agg') import numpy as np import timeit num_repeat = 10 stmt = "train(model)" setup = "model = ModelParallelResNet50()" # globals arg is only available in Python 3. In Python 2, use the following # import __builtin__ # __builtin__.__dict__.update(locals()) mp_run_times = timeit.repeat(stmt, setup, number=1, repeat=num_repeat, globals=globals()) mp_mean, mp_std = np.mean(mp_run_times), np.std(mp_run_times) setup = "import torchvision.models as models;" + \ "model = models.resnet50(num_classes=num_classes).to('cuda:0')" rn_run_times = timeit.repeat(stmt, setup, number=1, repeat=num_repeat, globals=globals()) rn_mean, rn_std = np.mean(rn_run_times), np.std(rn_run_times) def plot(means, stds, labels, fig_name):
from __future__ import absolute_import, print_function import timeit import integrate0, integrate1, integrate2 number = 10 py_time = None for m in ('integrate0', 'integrate1', 'integrate2'): print(m) t = min(timeit.repeat("integrate_f(0.0, 10.0, 100000)", "from %s import integrate_f" % m, number=number)) if py_time is None: py_time = t print(" ", t / number, "s") print(" ", py_time / t)
def print_time(op, expr): ts = timeit.repeat(expr, globals=globals(), number=1, repeat=5) print(RESULT_FORMAT.format(op=op, time=min(ts)))
def _test_performance(self): stmt = """recommends_precompute()""" setup = """from recommends.tasks import recommends_precompute""" print "timing..." times = timeit.repeat(stmt, setup, number=100) print times
import timeit s = "abcdefghijklmnopqrstuvwxyz" * 10 timeit.repeat(lambda: reverse_string1(s)) timeit.repeat(lambda: reverse_string2(s)) timeit.repeat(lambda: reverse_string3(s)) def reverse_string3(s): chars = list(s) for i in range(len(s) // 2): tmp = chars[i] chars[i] = chars[len(s) - i - 1] chars[len(s) - i - 1] = tmp return ''.join(chars) data = reverse_string3("TURBO") # print(data) # for elem in reversed("TURBO"): # print(elem) text = "TURBO"[::-1] # print(text) def reverse_string2(s): return "".join(reversed(s))
i = m - 1 k = m - 1 while i < n: if text[i] == pattern[k]: if k == 0: return i else: i -= 1 k -= 1 else: j = last.get(text[i], -1) i += m - min(k, j + 1) k = m - 1 return -1 if __name__ == '__main__': print brute_force('Hello World', 'lo Wo') print brute_force('Hello World', 'lo wo') print boyer_moore('Hello World', 'lo Wo') print boyer_moore('Hello World', 'lo wo') print repeat('brute_force(\'Hello World\', \'lo Wo\')', 'from algo.pattern_matching import brute_force', repeat=3) print repeat('boyer_moore(\'Hello World\', \'lo Wo\')', 'from algo.pattern_matching import boyer_moore', repeat=3)
from timeit import repeat print( repeat( "new_list=list(filter(None, your_list))", 'your_list= 100*["a", "b", "", "", "c", "", "d", "e", "f", "", "g"]', repeat=3, number=100000)) print( repeat( "your_list=[x for x in your_list if x != '']", 'your_list= 100*["a", "b", "", "", "c", "", "d", "e", "f", "", "g"]', repeat=3, number=100000)) print( repeat( "while '' in your_list: your_list.remove('')", 'your_list= 100*["a", "b", "", "", "c", "", "d", "e", "f", "", "g"]', repeat=3, number=100000)) ''' result: [1.26160959, 1.26600539, 1.2595593159999998] [2.6536732560000003, 2.6442194679999993, 2.663753957999999] [0.6465177769999997, 0.6435874330000004, 0.6530912820000001] '''
# %% read_from_state(state_out, state_map_in, 'matrix.0.1.nb') # %% # Read time def read_run(): for i in range(len(state_map_initial) - 1): read_from_state(state_out, state_map_in, state_map_initial[i]) # %% t_read = min(repeat(lambda: read_run(), number=1000, repeat=40)) t_read # %% # standard read time def read_run_standard(): state.foo state.bar state.nested.na state.nested.nb state.matrix[0][0].na state.matrix[0][0].nb state.matrix[0][1].na state.matrix[0][1].nb state.matrix[1][0].na
ranges_to_check = list(zip(len(ranges_to_check) * [n], ranges_to_check)) assert len(ranges_to_check) == nbr_processes results = pool.map(check_prime_in_range, ranges_to_check) if False in results: return False return True if __name__ == "__main__": NBR_PROCESSES = 4 pool = Pool(processes=NBR_PROCESSES) print("Testing with {} processes".format(NBR_PROCESSES)) for label, nbr in [ ("trivial non-prime", 112272535095295), ("expensive non-prime18_1", 100109100129100369), ("expensive non-prime18_2", 100109100129101027), # ("prime", 112272535095293)]: # 15 #("prime17", 10000000002065383)] ("prime18_1", 100109100129100151), ("prime18_2", 100109100129162907) ]: #("prime23", 22360679774997896964091)]: time_costs = timeit.repeat( stmt="check_prime({}, pool, {})".format(nbr, NBR_PROCESSES), repeat=20, number=1, setup="from __main__ import pool, check_prime") # print "check_prime reports:", check_prime(nbr, pool, NBR_PROCESSES) print("{:19} ({}) {: 3.6f}s".format(label, nbr, min(time_costs)))
return saved[(args, hashed)] saved[(args, hashed)] = func(*args, **kwargs) return saved[(args, hashed)] return new_func @myCache def fibs(n): '''Is this the only docstring now''' if n == 0: return 1 elif n == 1: return 1 else: return fibs(n - 1) + fibs(n - 2) setup_code = "from __main__ import fibs" stmt = "fibs(n=40)" times = repeat(setup=setup_code, stmt=stmt, repeat=3, number=3) print(min(times)) kwd_mark = object() # sentinel for separating args from kwargs # Used in actual functools.lru_cache codebase to cache dictionaries. def cached_call(*args, **kwargs): key = args + (kwd_mark, ) + tuple(sorted(kwargs.items())) return cache.get(key)
t2 = timeit.timeit(stmt="test2()", setup="from __main__ import test2", number=1000) t3 = timeit.timeit(stmt="test3()", setup="from __main__ import test3", number=1000) t4 = timeit.timeit(stmt="test4()", setup="from __main__ import test4", number=1000) print(t1) print(t2) print(t3) print(t4) t5 = timeit.repeat(stmt="test1()", setup="from __main__ import test1", number=1000, repeat=10) print(t5) print(sum(t5) / len(t5)) ''' t1 = Timer("test1()", "from __main__ import test1") print "concat %f second\n " % t1.timeit(number=1000) #print("concat {} second\n ".format(t1.timeit(number=1000))) t2 = Timer("test2()", "from __mian__ import test2") print "append %f second\n " % t2.timeit(number=1000) #print("append {} second\n ".format(t2.timeit(number=1000))) t3 = Timer("test3()", "from __main__ import test3") print "comprehension %f second\n " % t3.timeit(number=1000)
print(py_w) print('Solve time: {:.2f} seconds'.format(round(t1 - t0, 2))) # Numpy Fit ########################################################### np_w = np_descent(x, d, mu, N_epochs) print(np_w) setup = ("from __main__ import x, d, mu, N_epochs, np_descent;" ";import numpy as np") repeat = 5 number = 5 # Number of loops within each repeat np_times = timeit.repeat('np_descent(x, d, mu, N_epochs)', setup=setup, repeat=repeat, number=number) print(min(np_times) / number) # Tensorflow Fit ########################################################### import tensorflow as tf # Tensorflow variables X_tf = tf.constant(X, dtype=tf.float32, name="X_tf") d_tf = tf.constant(d, dtype=tf.float32, name="d_tf") tf_w = tf_descent(X_tf, d_tf, mu, N_epochs) print(tf_w)
if __name__ == '__main__': import timeit from time import time s0 = time() all_times = [] for classe in [TestContainer, TestPatio, TestPilha]: functions = [func for func in dir(classe) if 'test_' in func] for function in functions: # print(f'classe:{classe.__name__} function:{function}') setup_code = f""" test = {classe.__name__}() """ test_code = f""" test.setUp() test.{function}() test.tearDown() """ times = timeit.repeat(setup=setup_code, stmt=test_code, repeat=3, number=1000, globals=globals()) all_times.append( (min(times), f'Time {classe.__name__} {function} {min(times):0.4f}')) s1 = time() for _, descricao in sorted(all_times, key=lambda x: x[0]): print(descricao) print(f'Tempo total {s1 - s0:0.2f}')
from scipy.cluster.vq import kmeans # - Memory our_mem_usage = memory_usage((kmeans_cluster_assignment, (3, hard_points), {'tolerance': 10e-5, 'max_iterations': 20})) scipy_mem_usage = memory_usage((kmeans, (hard_points, 3))) print(f"Mean memory usage of our implementations: {np.mean(our_mem_usage):.4f} MiB") print(f"Mean memory usage of scipy implementations: {np.mean(scipy_mem_usage):.4f} MiB") # - Speed our_timing = timeit.repeat( "kmeans_cluster_assignment(3, hard_points, tolerance=10e-5, max_iterations=20)", globals=globals(), repeat=7, number=100 ) scipy_timing = timeit.repeat( "kmeans(hard_points, 3)", globals=globals(), repeat=7, number=100 ) print(f"Run time for our implementation: {np.mean(our_timing):.2f}+-{np.std(our_timing):.2f} ms") print(f"Run time for scipy implementation: {np.mean(scipy_timing):.2f}+-{np.std(scipy_timing):.2f} ms") # - Quality our_cluster_assignments = kmeans_cluster_assignment(3, hard_points, tolerance=10e-5, max_iterations=20)
setup = """ from randomgen import Generator rg = Generator({prng}()) """ test = "rg.{func}" table = OrderedDict() for prng in PRNGS: print(prng.__name__) print('-' * 40) col = OrderedDict() for key in funcs: print(key) t = repeat(test.format(func=funcs[key]), setup.format(prng=prng().__class__.__name__), number=NUMBER, repeat=REPEAT, globals=globals()) col[key] = 1000 * min(t) print('\n' * 2) col = pd.Series(col) table[prng().__class__.__name__] = col npfuncs = OrderedDict() npfuncs.update(funcs) npfuncs['Uniform'] = f'random_sample(size={SIZE})' npfuncs['Uint64'] = f'randint(2**64, dtype="uint64", size={SIZE})' npfuncs['Uint32'] = f'randint(2**32, dtype="uint32", size={SIZE})' setup = """ from numpy.random import RandomState
oddList.tail = oddNode appendval = appendval.next orgList = LinkedList() orgList.head = Node(1) e2 = Node(2) e3 = Node(3) e4 = Node(4) e5 = Node(5) orgList.head.next = e2 e2.next = e3 e3.next = e4 e4.next = e5 evenList = LinkedList() oddList = LinkedList() orgList.appendInt() """ time_comp = timeit.repeat(stmt=tcode, repeat=2) print('min time: ', min(time_comp), 'second') print('max time: ', max(time_comp), 'second') print('actual duration: ', t2 - t1, 'sec') # Space complexity: print( 'space usage: ', sys.getsizeof(Node) + sys.getsizeof(LinkedList) + sys.getsizeof(orgList) + sys.getsizeof(evenList) + sys.getsizeof(oddList), 'bytes')
from fib_py import fib_py from fib_py_cy import fib_py_cy from fib_cy import fib_cy from fib_py_double import fib_py_double from fib_py_cy_double import fib_py_cy_double from fib_cy_double import fib_cy_double import timeit import sys # pass in Fib number to calculate n = int(sys.argv[1]) # time each version t1 = min( timeit.repeat(f"fib_py({n})", number=100000, repeat=10, setup="from fib_py import fib_py; gc.enable()")) print(f'Pure python: answer = {fib_py(n)}, time = {t1}, speedup = 1.0') t = min( timeit.repeat(f"fib_py_cy({n})", number=100000, repeat=10, setup="from fib_py_cy import fib_py_cy; gc.enable()")) print( f'Cythonized Python: answer = {fib_py_cy(n)}, time = {t}, speedup = {t1 / t}' ) t = min( timeit.repeat(f"fib_cy({n})", number=100000,
import timeit modu = '''from math import pow''' code2 = '''def fun(): mylist = [] for i in range(100): mylist.append(i**i) ''' code = '''def fun(): mylist = [] for i in range(100): mylist.append(pow(i,i))''' print(timeit.timeit(stmt=code,setup=modu,number=1000000)) print(timeit.timeit(stmt=code2,number=1000000)) print(timeit.repeat(stmt=code2,number=1000000,repeat=3)) #
''' list comprehension вдвое быстрее for? ''' import timeit NOT_REPITED_CODE = ''' ''' TESTED_CODE = ''' lst = [] for i in range(1000000): lst.append(i) # lst = [i for i in range(1000000)] ''' # вывод на печать результатов 5 (по умолчанию) измерений number = 100 повторов print(sum(timeit.repeat(stmt=TESTED_CODE, setup=NOT_REPITED_CODE, number=100)) / 5) # 1.876 # lst = [] # for i in range(100000): # lst.append(i) # lst = [i for i in range(100000)] # 0.959