def run(name, ctx, number=100000): sys.path[0] = '.' print("\n%-16s msec rps tcalls funcs" % name) for framework in frameworks: os.chdir(os.path.join(path, framework)) if not os.path.exists(name): print("%-22s not available" % framework) continue try: main = __import__('app', None, None, ['main']).main render = main(name) f = lambda: render(ctx) f() # warm up first call time = timeit(f, number=number) st = Stats(profile.Profile().runctx('f()', globals(), locals())) print("%-16s %6.0f %6.0f %7d %6d" % (framework, 1000 * time, number / time, st.total_calls, len(st.stats))) if 0: st = Stats(profile.Profile().runctx('timeit(f, number=number)', globals(), locals())) st.strip_dirs().sort_stats('time').print_stats(10) del sys.modules['app'] except ImportError: print("%-22s not installed" % framework)
async def test_profiler_dump(): profiler = None fl = NamedTemporaryFile(delete=False) path = NamedTemporaryFile(delete=False).name fl.close() try: profiler = Profiler( interval=0.1, top_results=10, path=path, ) await profiler.start() # Get first update await asyncio.sleep(0.01) stats1 = Stats(path) # Not enough sleep till next update await asyncio.sleep(0.01) stats2 = Stats(path) # Getting the same dump assert stats1.stats == stats2.stats # Enough sleep till next update await asyncio.sleep(0.2) stats3 = Stats(path) # Getting updated dump assert stats2.stats != stats3.stats finally: if profiler: await profiler.stop() os.remove(path)
def profile_run(name): datafile = f"TSCoDe_{name}_cProfile.dat" cProfile.run("docker.dock_structures()", datafile) with open(f"TSCoDe_{name}_cProfile_output_time.txt", "w") as f: p = Stats(datafile, stream=f) p.sort_stats("time").print_stats() with open(f"TSCoDe_{name}_cProfile_output_cumtime.txt", "w") as f: p = Stats(datafile, stream=f) p.sort_stats("cumtime").print_stats()
def profiled_wrapper(filename, name): datafile = f"TSCoDe_{name}_cProfile.dat" cProfile.run("RunEmbedding(Embedder(filename, args.name))", datafile) with open(f"TSCoDe_{name}_cProfile_output_time.txt", "w") as f: p = Stats(datafile, stream=f) p.sort_stats("time").print_stats() with open(f"TSCoDe_{name}_cProfile_output_cumtime.txt", "w") as f: p = Stats(datafile, stream=f) p.sort_stats("cumtime").print_stats()
def _(*args, **kwargs): prof = Profile() try: return prof.runcall(f, *args, **kwargs) finally: if to_stdout: stats = Stats(prof) print_stats(stats) else: with NamedTemporaryFile(prefix='dpark_profile_', delete=False) as fd: print('===\n', datetime.today(), getpass.getuser(), sys.argv[0], file=fd) stats = Stats(prof, stream=fd) print_stats(stats)
def profiler(check, log_msg: str = '', log: bool = True, kwargs: dict = None, sort: str = 'tottime'): # note: https://stackoverflow.com/questions/10326936/sort-cprofile-output-by-percall-when-profiling-a-python-script from cProfile import Profile from pstats import Stats from io import StringIO _ = Profile() _.enable() if kwargs is None: kwargs = {} check_response = check(**kwargs) _.disable() result = StringIO() Stats(_, stream=result).sort_stats(sort).print_stats(DEBUG_PROFILE_TOP_N) cleaned_result = result.getvalue().splitlines()[:-1] del cleaned_result[1:5] cleaned_result = '\n'.join(cleaned_result) if log: web_log(output=f"{log_msg}:\n{cleaned_result}", level=8) return check_response
def get(self, profile_name): profile_name = unquote_plus(profile_name) s = Stats(profile_name) self.render('viz.html', profile_name=profile_name, table_rows=table_rows(s), callees=json_stats(s))
def __run(self, *args, **kwargs): __start = time.time() # notify if we don't process quickly if __start - self.__time_submitted > 0.05: self.log.warning(f'Starting of {self.name} took too long: {__start - self.__time_submitted:.2f}s. ' f'Maybe there are not enough threads?') # start profiler pr = Profile() pr.enable() # Execute the function try: self._func(*args, **kwargs) except Exception as e: self.__format_traceback(e, *args, **kwargs) # disable profiler pr.disable() # log warning if execution takes too long __dur = time.time() - __start if self.__warn_too_long and __dur > 0.8: self.log.warning(f'Execution of {self.name} took too long: {__dur:.2f}s') s = io.StringIO() ps = Stats(pr, stream=s).sort_stats(SortKey.CUMULATIVE) ps.print_stats(0.1) # limit to output to 10% of the lines for line in s.getvalue().splitlines()[4:]: # skip the amount of calls and "Ordered by:" if line: self.log.warning(line)
def starfish(): parser = build_parser() args, argv = parser.parse_known_args() art = """ _ __ _ _ | | / _(_) | | ___| |_ __ _ _ __| |_ _ ___| |__ / __| __/ _` | '__| _| / __| '_ ` \__ \ || (_| | | | | | \__ \ | | | |___/\__\__,_|_| |_| |_|___/_| |_| """ print(art) if args.profile: profiler = cProfile.Profile() profiler.enable() if args.starfish_command is None: parser.print_help() parser.exit(status=2) args.starfish_command(args, len(argv) != 0) if args.profile: stats = Stats(profiler) stats.sort_stats('tottime').print_stats(PROFILER_LINES)
def print_stats(limit=limit, sort=sort, strip_dirs=strip_dirs): if _have_stats: stats = Stats(_profile) if strip_dirs: stats.strip_dirs() apply(stats.sort_stats, sort) apply(stats.print_stats, limit)
def example_one(): """ 20003 function calls in 0.778 seconds Ordered by: cumulative time ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.778 0.778 item_58_profile.py:38(<lambda>) 1 0.001 0.001 0.778 0.778 item_58_profile.py:19(insertion_sort) 10000 0.764 0.000 0.777 0.000 item_58_profile.py:26(insert_value) 9989 0.013 0.000 0.013 0.000 {method 'insert' of 'list' objects} 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects} 11 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects} - notes for Nick: - we can see that the biggest use of CPU in our test is the cumulative time spent in the 'insert_value' function. """ max_size = 10**4 data = [randint(0, max_size) for _ in range(max_size)] test = lambda: insertion_sort(data) profiler = Profile() profiler.runcall(test) # to extract statistics about the 'test' function's performance, we use pstats stats = Stats(profiler) stats.strip_dirs() stats.sort_stats('cumulative') stats.print_stats()
def tearDown(self): """ """ p = Stats(self.prof) p.sort_stats("cumtime") if self.verbose: p.dump_stats("profiles/test_graph.py.prof") p.strip_dirs()
def __call__(self, environ, start_response): response = [] def _start_response(status, headers, exc_info=None): start_response(status, headers, exc_info) return response.append def runapp(): leftover = self.application(environ, _start_response) response.extend(leftover) if hasattr(leftover, 'close'): leftover.close() profile = Profile() profile.runcall(runapp) body = b''.join(response) buff = StringIO() stats = Stats(profile, stream=buff).sort_stats(2) table = [] for func in stats.fcn_list[:self.limit]: cc, nc, tt, ct, callers = stats.stats[func] table.append({ 'source': '{}@{}:{}'.format(func[2], func[0], func[1]), 'call_count': nc, 'total_time': ct, }) custom_response = self.logwriter(environ, body, table) if custom_response: return [custom_response] return [body]
def stopTest(self, test): super(BenchTestResult, self).stopTest(test) if self._benchmark: self._profiler.disable() stats = Stats(self._profiler) stats.sort_stats(self._sort) stats.print_stats(self._limit)
def expose(self, widget, event): context = widget.window.cairo_create() #r = (event.area.x, event.area.y, event.area.width, event.area.height) #context.rectangle(r[0]-.5, r[1]-.5, r[2]+1, r[3]+1) #context.clip() if False: import profile profile.runctx("self.draw(context, event.area)", locals(), globals(), "/tmp/pychessprofile") from pstats import Stats s = Stats("/tmp/pychessprofile") s.sort_stats('cumulative') s.print_stats() else: self.drawcount += 1 start = time() self.animationLock.acquire() self.draw(context, event.area) self.animationLock.release() self.drawtime += time() - start #if self.drawcount % 100 == 0: # print "Average FPS: %0.3f - %d / %d" % \ # (self.drawcount/self.drawtime, self.drawcount, self.drawtime) return False
def run(number=2): import profile from timeit import Timer from pstats import Stats names = globals().keys() names = sorted([(name, globals()[name]) for name in names if name.startswith('test_')]) print(" msec rps tcalls funcs") for name, test in names: if name not in ( "test_hypergen", #"test_hypergen_real", "test_hypergen_real_py", "test_jinja2", "test_list_extend", "test_tenjin"): continue if name == "test_django": continue if test: #assert isinstance(test(), s) t = Timer(setup='from __main__ import %s as t' % name, stmt='t()') t = t.timeit(number=number) st = Stats(profile.Profile().runctx('test()', globals(), locals())) print('%-17s %7.2f %6.2f %7d %6d' % (name[5:], 1000 * t / number, number / t, st.total_calls, len(st.stats))) else: print('%-26s not installed' % name[5:])
def profilingStats(): print(f"# Profiling\n") p = Stats('stats') p.print_stats = print_stats p.strip_dirs().sort_stats('cumulative').print_stats(p, 100) os.remove('stats') print(f"# Other prints\n")
def write_profile(pfile='./logs/profile.out'): global BUBBLE_PROFILE if not BUBBLE_PROFILE: return BUBBLE_PROFILE.disable() #s = io.StringIO() s = StringIO() sortby = 'cumulative' #ps = Stats(BUBBLE_PROFILE).sort_stats(sortby) ps = Stats(BUBBLE_PROFILE, stream=s).sort_stats(sortby) ps.print_stats() # print(s.getvalue()) # now=arrow.now() #pstats_file='./logs/profiling'+str(now)+'.pstats' #profile_text='./logs/profile'+str(now)+'.txt' pstats_file = './logs/profiling.pstats' profile_text = './logs/profile.txt' BUBBLE_PROFILE.dump_stats(pstats_file) with open(profile_text, 'a+') as pf: pf.write(s.getvalue()) print("end_profile") print('BUBBLE_PROFILE:pstats_file:' + pstats_file) print('BUBBLE_PROFILE:profile_text:' + profile_text)
def to_txt(self, filename): """ Saves all profiles into one file. @param filename filename where to save the profiles, can be a stream """ if len(self) == 0: raise ValueError( # pragma: no cover "No profile was done.") if isinstance(filename, str): with open(filename, "w") as f: # pylint: disable=W1514 self.to_txt(f) return f = filename f.write(self.name + "\n") for i, (prof, kw) in enumerate(self): f.write("------------------------------------------------------\n") f.write("profile %d\n" % i) if kw: for a, b in sorted(kw.items()): f.write("%s=%s\n" % (a, str(b).replace('\n', '\\n'))) f.write("--\n") if hasattr(prof, 'output_text'): f.write(prof.output_text(unicode=False, color=False)) else: s = StringIO() sortby = SortKey.CUMULATIVE ps = Stats(prof, stream=s).sort_stats(sortby) ps.print_stats() f.write(s.getvalue()) f.write("\n")
def main(): """Conduct a peridynamics simulation.""" parser = argparse.ArgumentParser() parser.add_argument('--profile', action='store_const', const=True) args = parser.parse_args() if args.profile: profile = cProfile.Profile() profile.enable() model = Model(mesh_file, horizon=0.1, critical_strain=0.005, elastic_modulus=0.05, initial_crack=is_crack) # Set left-hand side and right-hand side of boundary indices = np.arange(model.nnodes) model.lhs = indices[model.coords[:, 0] < 1.5*model.horizon] model.rhs = indices[model.coords[:, 0] > 1.0 - 1.5*model.horizon] integrator = Euler(dt=1e-3) u, damage, *_ = model.simulate( steps=100, integrator=integrator, boundary_function=boundary_function, write=1000 ) if args.profile: profile.disable() s = StringIO() stats = Stats(profile, stream=s).sort_stats(SortKey.CUMULATIVE) stats.print_stats() print(s.getvalue())
def profile(): prof = Profile() prof.runcall(f1) stat = Stats(prof) stat.strip_dirs() stat.sort_stats('cumulative') stat.print_stats()
def tearDown(self): if self.should_profile: results = Stats(self.profile) results.strip_dirs() results.sort_stats('cumulative') results.print_stats(50) super().tearDown()
def __call__(self, environ, start_response): response_body = [] def catching_start_response(status, headers, exc_info=None): start_response(status, headers, exc_info) return response_body.append def runapp(): appiter = self._app(environ, catching_start_response) response_body.extend(appiter) if hasattr(appiter, 'close'): appiter.close() p = Profile() p.runcall(runapp) body = ''.join(response_body) stats = Stats(p, stream=self._stream) stats.sort_stats(*self._sort_by) self._stream.write('-' * 80) self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO')) stats.print_stats(*self._restrictions) self._stream.write('-' * 80 + '\n\n') return [body]
def main(): args = parse_arguments() # Load all filter globs as regular expressions. filename_filters = list( re.compile(fnmatch.translate(filter_glob)) for filter_glob in args.filename_filter_glob_list) if args.remove_garbage: filename_filters = [ # Remove all third party modules or Python modules. re.compile(r'.*lib[\\/]python\d?\.?\d?/.*'), # Remove strange filenames. re.compile(r'~|<string>|<frozen .*>'), ] + filename_filters stats = Stats(args.profile_filename) filtered_stats = { key: (nc, cc, tt, ct, { caller_key: timing_tuple for caller_key, timing_tuple in iteritems(callers) if should_include_stats(caller_key, filename_filters) }) # The two letter variables represent various stats, like number # of calls. Read the pstats.py source code for more information. for key, (nc, cc, tt, ct, callers) in iteritems(stats.stats) if should_include_stats(key, filename_filters) } if args.print_included_filenames: print_included_filenames(filtered_stats) marshal.dump(filtered_stats, sys.stdout.buffer)
def example_two(): """ 30003 function calls in 0.018 seconds Ordered by: cumulative time ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.018 0.018 item_58_profile.py:98(<lambda>) 1 0.001 0.001 0.018 0.018 item_58_profile.py:38(insertion_sort) 10000 0.002 0.000 0.017 0.000 item_58_profile.py:88(insert_value_better) 10000 0.012 0.000 0.012 0.000 {method 'insert' of 'list' objects} 10000 0.003 0.000 0.003 0.000 {built-in method _bisect.bisect_left} 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects} """ max_size = 10**4 data = [randint(0, max_size) for _ in range(max_size)] test = lambda: insertion_sort(data) profiler = Profile() profiler.runcall(test) # to extract statistics about the 'test' function's performance, we use pstats stats = Stats(profiler) stats.strip_dirs() stats.sort_stats('cumulative') stats.print_stats()
def prof_wrapper(*args, **kwargs): with cProfile.Profile() as pr: value = profunc(*args, **kwargs) p = Stats(pr) p.sort_stats(SortKey.TIME).dump_stats( f"profiles/{profunc.__name__}_{args[0]}.prof") return value
def tearDown(self): """Report profiling results""" p = Stats(self.pr) p.strip_dirs() p.sort_stats('cumtime') p.print_stats() print "\n--->>>"
def new_func(*args, **kwargs): # create a Profile and enable tracking prof = Profile() prof.enable() # call the profiled function result = func(*args, **kwargs) # disable tracking prof.disable() # simulate writing to file via string buffer buff = StringIO() # write stats to string buffer prof_stats = Stats(prof, stream=buff) prof_stats.print_stats() # print the output from string buffer print('-' * 79) path = func.__code__.co_filename.split(sep) path = path[len(path) - list(reversed(path)).index(package) - 1:] print(f'{".".join(path)} :: ' f'{func.__name__}') print(buff.getvalue()) # propagate back the real func's output return result
def main(): """Main sequence""" analyser = Analyser(config=ProfilingConfig) data = import_all(config=ProfilingConfig) analyser.run(data) del analyser del data profiler = Profile() tracemalloc.start(10) time1 = tracemalloc.take_snapshot() profiler.runcall(test) time2 = tracemalloc.take_snapshot() time_stats = Stats(profiler) time_stats.strip_dirs() time_stats.sort_stats('cumulative') print("\n===Time Profiler Stats===\n") time_stats.print_stats(TOP_STATS) print("\n===Time Profiler Callers===\n") time_stats.print_callers(TOP_STATS) memory_stats = time2.compare_to(time1, 'lineno') print("\n===Memory Profiler Callers===\n") for stat in memory_stats[:3]: print(stat) print("\n===Top Memory Consumer===\n") top = memory_stats[0] print('\n'.join(top.traceback.format()))
def viz_handler(request): pr.disable() try: real_path = str(request.url) if 'X-VirtualHost-Monster' in request.headers: base_url = request.headers['X-VirtualHost-Monster'] if base_url[-1] == '/': base_url = base_url[:-1] real_path = base_url + request.path sio = io.StringIO() ps = pstats.Stats(pr, stream=sio) temp = tempfile.NamedTemporaryFile() ps.dump_stats(temp.name) s = Stats(temp.name) temp.close() context = { 'table_rows': table_rows(s), 'callees': json_stats(s), 'profile_name': temp.name, 'path': real_path } response = aiohttp_jinja2.render_template('viz.html', request, context) except: raise RuntimeError('Could not read %s.' % profile_name) finally: pr.enable() return response