def line_profile(items): """A context manager which prints a line-by-line profile for the given functions, modules, or module names while execution is in its context. Example: with line_profile(__name__, Class.some_function, some_module): do_something() """ from line_profiler import LineProfiler prof = LineProfiler() for item in items: if inspect.isfunction(item): prof.add_function(item) elif inspect.ismodule(item): prof.add_module(item) elif isinstance(item, str): prof.add_module(sys.modules[str]) else: raise TypeError( 'Inputs must be functions, modules, or module names') prof.enable() yield prof.disable() prof.print_stats()
def profile_list_serialization(serializer, child_serializer, instances_list): if os.environ.get('CI', None) != 'true' or not LineProfiler: return profile = LineProfiler(serializer.instances_list, child_serializer.instances_list) profile.enable() serializer.to_representation(instances_list) profile.disable() profile.print_stats()
def profile_list_deserialization(serializer, child_serializer, data_list): if os.environ.get('CI', None) != 'true' or not LineProfiler: return profile = LineProfiler(serializer.to_internal_value, child_serializer.to_internal_value) profile.enable() serializer.to_internal_value(data_list) profile.disable() profile.print_stats()
def profileTimeLine(func, args, kw_args): from line_profiler import LineProfiler prf = LineProfiler(func) # from . import util; prf.add_function(util.loadYml) # you may add more functions prf.enable() func(*args, **kw_args) prf.disable() prf.dump_stats('lpstats') with open('lineprofstats.txt', 'w') as flw: prf.print_stats(flw)
def test_enable_profile_all(self): lp = LineProfiler() lp.enable_profile_all() lp.enable() value = f(10) lp.disable() self.assertEqual(value, f(10)) self.assertEqual(len(lp.code_map.keys()), 1) self.assertEqual(len(lp.code_map[f.__code__]), 2)
def profile_on(fcn_names=None): if fcn_names and HAS_LINE_PROFILER: pr = LineProfiler() for fcn_name in fcn_names: pr.add_function(fcn_name) pr.enable() return pr pr = cProfile.Profile() pr.enable() return pr
def process(cell_id): if strtobool(get_param(DbName.COMMON, 'profile')[0]): print('cell id : {}'.format(cell_id)) pr = LineProfiler() rtg = RdfToGcnf(cell_id) pr.add_function(rtg.create_gcn_file) pr.enable() rtg.create_gcn_file() pr.disable() pr.print_stats() del rtg else: rtg = RdfToGcnf(cell_id) rtg.create_gcn_file() del rtg
def line( cls_runner: tp.Type[Perf], pattern_func: str, ) -> None: runner = cls_runner() for name in runner.iter_function_names(pattern_func): f = getattr(runner, name) profiler = LineProfiler() if not runner.meta: raise NotImplementedError('must define runner.meta') profiler.add_function(runner.meta[name].line_target) profiler.enable() f() profiler.disable() profiler.print_stats()
def main(): profiler = cProfile.Profile() profiler.enable() function_runner('original_method') function_runner('step_one') function_runner('step_two') function_runner('step_three') function_runner('step_four') function_runner('step_five') function_runner('step_six') function_runner('step_seven') function_runner('step_eight') function_runner('step_nine') function_runner('current') profiler.disable() profiler.dump_stats('function_event.stats') line_profiler = LineProfiler(CurrentFunctionContainer().current) line_profiler.enable() function_runner('current') line_profiler.disable() line_profiler.dump_stats('function_event.line_stats') line_profiler.print_stats() print 'Original', timeit.timeit( lambda: function_runner('original_method'), number=7) print 'One', timeit.timeit( lambda: function_runner('step_one'), number=7) print 'Two', timeit.timeit( lambda: function_runner('step_two'), number=7) print 'Three', timeit.timeit( lambda: function_runner('step_three'), number=7) print 'Four', timeit.timeit( lambda: function_runner('step_four'), number=7) print 'Five', timeit.timeit( lambda: function_runner('step_five'), number=7) print 'Six', timeit.timeit( lambda: function_runner('step_six'), number=7) print 'Seven', timeit.timeit( lambda: function_runner('step_seven'), number=7) print 'Eight', timeit.timeit( lambda: function_runner('step_eight'), number=7) print 'Nine', timeit.timeit( lambda: function_runner('step_nine'), number=7) print 'Current', timeit.timeit( lambda: function_runner('current'), number=7)
def run_profiling(args): lprofiler = LineProfiler() monitor_fuctions = [ api.problem.submit_key, api.problem.get_unlocked_pids, api.problem.get_solved_pids, api.problem.get_all_problems, api.problem.get_solved_problems, api.stats.get_score, api.cache.memoize, api.autogen.grade_problem_instance, api.autogen.get_problem_instance, api.autogen.get_number_of_instances ] for func in monitor_fuctions: lprofiler.add_function(func) lprofiler.enable() if args.stack: profiler = Profiler(use_signal=False) profiler.start() for func, a, kw in operations: func(*a, **kw) if args.stack: profiler.stop() lprofiler.disable() if args.print: print(profiler.output_text(unicode=True, color=True)) lprofiler.print_stats() output = open(args.output, "w") if args.stack: output.write(profiler.output_text(unicode=True)) if args.output_html is not None: output_html = open(args.output_html, "w") output_html.write(profiler.output_html()) output_html.close() print("Wrote test info to " + args.output_html) lprofiler.print_stats(output) output.close() print("Wrote test info to " + args.output)
def run_profiling(args): lprofiler = LineProfiler() monitor_fuctions = [api.problem.submit_key, api.problem.get_unlocked_pids, api.problem.get_solved_pids, api.problem.get_all_problems, api.problem.get_solved_problems, api.stats.get_score, api.cache.memoize, api.autogen.grade_problem_instance, api.autogen.get_problem_instance, api.autogen.get_number_of_instances] for func in monitor_fuctions: lprofiler.add_function(func) lprofiler.enable() if args.stack: profiler = Profiler(use_signal=False) profiler.start() for func, a, kw in operations: func(*a, **kw) if args.stack: profiler.stop() lprofiler.disable() if args.print: print(profiler.output_text(unicode=True, color=True)) lprofiler.print_stats() output = open(args.output, "w") if args.stack: output.write(profiler.output_text(unicode=True)) if args.output_html is not None: output_html = open(args.output_html, "w") output_html.write(profiler.output_html()) output_html.close() print("Wrote test info to " + args.output_html) lprofiler.print_stats(output) output.close() print("Wrote test info to " + args.output)
def _perf_hot_spot(): try: from line_profiler import LineProfiler except ImportError: print( 'WARNING: Unable to import line_profiler, skipping ' 'detailed performance checking', file=stderr) return for comp_c_f in COMP_VERT_C_F: # Generate a random net. with FixedSeed(0x4711): nets, Model, Comp = _gen(1, comp_c_f) net = nets[0] model = Model() # Attach the profiler. profiler = LineProfiler() profiler.add_function(Comp.forward) profiler.add_function(Comp.backward) profiler.enable() # Run the network forwards and backwards to accumulate statistics. gradient = model.gradient() for _ in range(32): net.forward(model) net.backward(model, gradient=gradient) profiler.print_stats() sanity = False if sanity: from .fdiff import fdiff_check fdiff_check(model, net)
grid[:, -10:, :] = fdtd.PML(name="pml_yhigh") # z boundaries grid[:, :, 0] = fdtd.PeriodicBoundary(name="zbounds") # objects grid[11:32, 30:84, 0:1] = fdtd.AnisotropicObject(permittivity=2.5, name="object") print(grid) print(f"courant number: {grid.courant_number}") # create and enable profiler profiler = LineProfiler() profiler.add_function(grid.update_E) profiler.enable() # run simulation grid.run(50, progress_bar=False) # print profiler summary profiler.print_stats() ## Plots # Fields if True: fig, axes = plt.subplots(2, 3, squeeze=False) titles = ["Ex: xy", "Ey: xy", "Ez: xy", "Hx: xy", "Hy: xy", "Hz: xy"] fields = bd.stack([
class SpecialTestRunner(SpecialTest): """ Test runner, calls the specified test under specified profiler Mode = None - no profiler, "c" - cProfile, "l" - LineProfiler, "h" - hotshot """ def __init__(self, test, mode=None): super(SpecialTestRunner, self).__init__() self.mode = mode self.test = test self.profiler = None def setup(self): if self.mode == 'c': import cProfile self.profiler = cProfile.Profile() elif self.mode == 'l': from line_profiler import LineProfiler self.profiler = LineProfiler() elif self.mode == 'h': import hotshot self.info['name'] = 'special.prof' self.profiler = hotshot.Profile(self.info['name']) self.test.setup() def run(self): if self.mode == 'c': self.profiler.enable() elif self.mode == 'l': self.profiler.enable_by_count() self.profiler.add_function(Handler.handle) self.profiler.add_function(Condition.check_string_match) self.profiler.add_function(Condition.check_function) self.profiler.add_function(Condition.check_list) t = Timer() # Run itself if self.mode == 'h': self.profiler.runcall(self.test.run) else: self.test.run() print('Test time: %s' % t.delta()) if self.mode == 'c': import pstats import StringIO self.profiler.disable() sio = StringIO.StringIO() ps = pstats.Stats(self.profiler, stream=sio).sort_stats('time') ps.print_stats() print(sio.getvalue()) elif self.mode == 'h': import hotshot.stats print('Processing results...') self.profiler.close() name = self.info['name'] stats = hotshot.stats.load(name) stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(50) print('Run "hotshot2calltree -o %s.out %s" to generate the cachegrind file' % (name, name)) elif self.mode == 'l': self.profiler.disable() self.profiler.print_stats()
import sys sys.path.append('/home/preclineu/andmar/sfw/nispat/nispat') from trendsurf import estimate from line_profiler import LineProfiler from bayesreg import BLR # with test covariates wdir = '/home/preclineu/andmar/py.sandbox/unittests' maskfile = os.path.join(wdir, 'mask.nii.gz') datafile = os.path.join(wdir, 'spect_data2_first5.nii.gz') basis = os.path.join(wdir, 'bfs/icp_basis_s8.nii.gz') lp = LineProfiler(BLR.loglik) lp.add_function(BLR.dloglik) lp.add_function(BLR.post) lp.enable() estimate(datafile, maskfile, basis) lp.disable() lp.print_stats() # to profile, can also put the following code in trendsurf.py # lp = LineProfiler(BLR.loglik) # lp = LineProfiler(BLR.dloglik) # lp.add_function(BLR.post) # lp.enable() # hyp[i, :] = breg.estimate(hyp0, Phi, Yz[:, i]) # lp.disable() # lp.print_stats()
class SpecialTestRunner(SpecialTest): """ Test runner, calls the specified test under specified profiler Mode = None - no profiler, "c" - cProfile, "l" - LineProfiler, "h" - hotshot """ def __init__(self, test, mode=None): super(SpecialTestRunner, self).__init__() self.mode = mode self.test = test self.profiler = None def setup(self): if self.mode == 'c': import cProfile self.profiler = cProfile.Profile() elif self.mode == 'l': from line_profiler import LineProfiler self.profiler = LineProfiler() elif self.mode == 'h': import hotshot self.info['name'] = 'special.prof' self.profiler = hotshot.Profile(self.info['name']) self.test.setup() def run(self): if self.mode == 'c': self.profiler.enable() elif self.mode == 'l': self.profiler.enable_by_count() self.profiler.add_function(Handler.handle) self.profiler.add_function(Condition.check_string_match) self.profiler.add_function(Condition.check_function) self.profiler.add_function(Condition.check_list) t = Timer() # Run itself if self.mode == 'h': self.profiler.runcall(self.test.run) else: self.test.run() print('Test time: %s' % t.delta()) if self.mode == 'c': import pstats import StringIO self.profiler.disable() sio = StringIO.StringIO() ps = pstats.Stats(self.profiler, stream=sio).sort_stats('time') ps.print_stats() print(sio.getvalue()) elif self.mode == 'h': import hotshot.stats print('Processing results...') self.profiler.close() name = self.info['name'] stats = hotshot.stats.load(name) stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(50) print( 'Run "hotshot2calltree -o %s.out %s" to generate the cachegrind file' % (name, name)) elif self.mode == 'l': self.profiler.disable() self.profiler.print_stats()
29 return True 30 31 1 2.0 2.0 0.0 return False """ """ O(N*(N/2)) """ def contains_duplicates(vec: np.ndarray) -> bool: vec_len: int = np.alen(vec) for i in range(vec_len - 1): i_scalar = vec[i] for j in range(i + 1, vec_len): if i_scalar == vec[j]: return True return False if __name__ == '__main__': vec = np.arange(1, 10001) p = LineProfiler(contains_duplicates) p.enable() # %time -> Wall time: 6.5 s contains_duplicates(vec) p.print_stats()
count = 1 for i in range(num): count *= i + 1 return count if __name__ == '__main__': num = 10000 lp = LineProfiler() lp.add_function(test) # 添加被测试函数 lp_wrapper = lp(myFunc) lp_wrapper(num) lp.print_stats() lp2 = LineProfiler(test) lp2.enable() # 开始性能分析 test() lp2.disable() # 停止性能分析 lp2.print_stats() # ### line_profiler # PyPI: https://pypi.org/project/line_profiler/ # 可以统计每行代码的执行次数和执行时间等,时间单位为微秒; # # ### 结果说明 # - Total Time: 测试代码的总运行时间 # - File: 代码文件的地址 # - Function: 函数的行号 # - Line: 每行代码的行号 # - Hits: 每行代码的运行次数 # - Time: 每行代码的运行时间