def profile_lines(self, functions, statement): from line_profiler import LineProfiler import __builtin__ profile = LineProfiler(*functions) # Add the profiler to the builtins for @profile.. # will probably not work for all modules in ecoControl, # as they are already imported before and @profile will then throw an error if 'profile' in __builtin__.__dict__: had_profile = True old_profile = __builtin__.__dict__['profile'] else: had_profile = False old_profile = None __builtin__.__dict__['profile'] = profile try: try: profile.runctx(statement, globals(), locals()) message = '' except SystemExit: message = """*** SystemExit exception caught in code being profiled.""" except KeyboardInterrupt: message = ("*** KeyboardInterrupt exception caught in code being " "profiled.") finally: if had_profile: __builtin__.__dict__['profile'] = old_profile # Trap text output. stdout_trap = StringIO() profile.print_stats(stdout_trap) output = stdout_trap.getvalue() output = output.rstrip() pfile = open("profile.txt", 'a') pfile.write("\n\n" + 20 * "=" + "*********====================== profile at time " + time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "==================================\n\n") pfile.write(output) pfile.close() print '\n*** Profile printout saved to text file profile.txt', message
def test_single_function(self): # Profile only if LineProfiler present. # To install: conda install line_profiler try: from line_profiler import LineProfiler except ImportError: return function_to_perf = functions.subtract_background n_iterations = 200 n_tests = 5 simulated_camera = CameraSimulation(CameraConfig("simulation"), size_x=2048, size_y=2048) for _ in range(n_tests): profile = LineProfiler() wrapped_function = profile(function_to_perf) images = [] backgrounds = [] for _ in range(n_iterations): images.append(simulated_camera.get_image()) backgrounds.append(simulated_camera.get_image()) for index in range(n_iterations): wrapped_function(images[index], backgrounds[index]) profile.print_stats()
def test_given_a_text_column_when_profiler_is_applied_with_high_level_analysis_then_it_finishes_quick(): # given TARGET_PROFILE_REPORT_FOLDER = '.cprofile/' if not os.path.exists(TARGET_PROFILE_REPORT_FOLDER): os.makedirs(TARGET_PROFILE_REPORT_FOLDER) profile = LineProfiler() source_data = generate_data() expected_execution_time = 32 # benchmarked: (first-time) 31.051079034805298, (cached) 0.918392 seconds # when: using default method (joblib Parallel) for parallelisation start_execution_time = time() profile_wrapper = profile(spelling_quality_score) for each in source_data: profile_wrapper(each) end_execution_time = time() actual_execution_time = end_execution_time - start_execution_time short_sha = shorten_sha(git_current_head_sha()) output_filename = f'{TARGET_PROFILE_REPORT_FOLDER}/spelling_quality_check-' \ f'{datetime.now().strftime("%d-%m-%Y-%H-%M-%S")}-{short_sha}' with open(f'{output_filename}.txt', 'w') as file: with redirect_stdout(file): profile.print_stats() profile.dump_stats(f'{output_filename}.lprof') # then assert actual_execution_time <= expected_execution_time, \ f"Expected duration: {expected_execution_time}, Actual duration: {actual_execution_time}." \ f"Slow down by: {abs(actual_execution_time - expected_execution_time)} seconds." \ f"We are cross the benchmark limit after a speed up after commit a81ed70."
def execute_commands(self, commands, profile_performance=False): if profile_performance: try: from line_profiler import LineProfiler except ImportError as e: raise ImportError( 'u need to install the line_profiler package') from e profiler = LineProfiler() else: profiler = None commands = self.__parse_commands(commands) for command in commands: method = command.method if profiler: method = profiler(method) if command.num_args == 0: method() elif command.num_args == 1: method(command.args) else: method(*command.args) if profiler: profiler.print_stats()
def wrapper(*args, **kwargs): from line_profiler import LineProfiler prof = LineProfiler() try: return prof(func)(*args, **kwargs) finally: prof.print_stats()
def test_staircase_stats_profile(self): """ How to use line profiler in a script? https://stackoverflow.com/a/43377717/6610243 Returns ------- """ return sequence = [0, 0, 1, 1, 1, 0, 0, 0, 1, 0] force_terminate = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0] try: from line_profiler import LineProfiler except ImportError: return # Baseline: Total time: 2.07796 s s for N = 10000 # Numpy: Total time: 2.11567 s # Combined 2 x n array: Total time: 1.5246 s # Total time: 1.19332 s # Remove creating temporary termination array for terminate == None: Total time: 1.19105 s # Switching 0 and 1 cases: Total time: 1.14139 s # Switching to: >2, 1, 0: Total time: 1.10141 s lp = LineProfiler() lp.add_function(strategy.DoubleStaircaseStrategy.get_staircase_stats) lp_wrapper = lp(lambda *args, **kwargs: [ strategy.DoubleStaircaseStrategy.get_staircase_stats( *args, **kwargs) for _ in range(10000) ]) lp_wrapper(sequence, step=(4, 2), force_terminate=force_terminate) with open("test_staircase_stats_profile.py", "w") as f: lp.print_stats(stream=f, output_unit=1e-3)
def wrapper(self, f, *args, **kwargs): # memory_profiler with StringIO() as s: rtn = profile(f, stream=s, precision=2)(*args, **kwargs) memory_value = self._memory_profiler_parse(s.getvalue()) # line_profiler prof = LineProfiler() prof.add_function(f) rtn = prof.runcall(f, *args, **kwargs) with StringIO() as s: prof.print_stats(stream=s) mix, line_tmp = self._line_profiler_parse(s.getvalue()) # memory line mix output template = self.L_M_TEMPLATE for l, m in zip(line_tmp, memory_value): l_m_mix = l[:5] + m mix.append(template.format(*l_m_mix)) mix[self.L_M_HEADER_INDEX] = template.format(*self.L_M_HEADER) mix[self.L_M_SEPARATOR_INDEX] += "=" * 27 self.logger.debug("line, memory profiler result\n" + "\n".join(mix)) return rtn
def profile_each_line(func, *args, **kwargs): profiler = LineProfiler() profiled_func = profiler(func) try: profiled_func(*args, **kwargs) finally: profiler.print_stats()
def assert_benchmark(expected_execution_time: float, target_function, profile_filename_prefix: str, commit_id: str): # given if not os.path.exists(TARGET_PROFILE_REPORT_FOLDER): os.makedirs(TARGET_PROFILE_REPORT_FOLDER) profile = LineProfiler() source_data = generate_data() # when: using default method (joblib Parallel) for parallelisation start_execution_time = time() profile_wrapper = profile(target_function) for each in source_data: profile_wrapper(each) actual_execution_time = time() - start_execution_time output_filename = f'{TARGET_PROFILE_REPORT_FOLDER}/{profile_filename_prefix}-' \ f'{datetime.now().strftime("%d-%m-%Y-%H-%M-%S")}-' \ f'{shorten_sha(git_current_head_sha())}' with open(f'{output_filename}.txt', 'w') as file: with redirect_stdout(file): profile.print_stats() profile.dump_stats(f'{output_filename}.lprof') # then assert actual_execution_time <= expected_execution_time, \ f"Expected duration: {expected_execution_time}, Actual duration: {actual_execution_time}. " \ f"Slow down by: {abs(actual_execution_time - expected_execution_time)} seconds. " \ f"We have crossed the benchmark limit after a speed up via commit {commit_id}."
def decorator(self, *args, **kwargs): func_return = f(self, *args, **kwargs) lp = LineProfiler() lp_wrap = lp(f) lp_wrap(self, *args, **kwargs) lp.print_stats() return func_return
def benchmark(cls, func, *args): from line_profiler import LineProfiler prf = LineProfiler() prf.add_function(func) ret = prf.runcall(func, *args) prf.print_stats() return ret
def newfunc(*args, **kwargs): if line_profile and getattr(sys, 'gettrace')() is None: from line_profiler import LineProfiler lp = LineProfiler() lp.timer_unit = 1e-6 for f in profile_funcs: lp.add_function(f) lp_wrapper = lp(func) t = time.time() res = lp_wrapper(*args, **kwargs) t = time.time() - t if verbose: lp.print_stats() return res, [t] else: t_lst = [] for i in range(100000): startTime = time.time() res = func(*args, **kwargs) t_lst.append(time.time() - startTime) if sum(t_lst) > min_time and len(t_lst) >= min_runs: if hasattr(func, '__name__'): fn = func.__name__ else: fn = "Function" if verbose: print('%s: %f +/-%f (%d runs)' % (fn, np.mean(t_lst), np.std(t_lst), i + 1)) return res, t_lst
def profile_function(my_func, *args, **kwargs): lp = LineProfiler() output_val = lp(my_func)(*args, **kwargs) # Redirect stdout so we can grab profile output mystdout = StringIO() lp.print_stats(stream=mystdout) lprof_lines = mystdout.getvalue().split('\n') profile_start = 1 + next( idx for idx, line in enumerate(lprof_lines) if '=====' in line) lprof_code_lines = lprof_lines[profile_start:-1] source_lines = inspect.getsource(my_func).split('\n') if len(source_lines) != len(lprof_code_lines): print( "WARNING! Mismatch in source length and returned line profiler estimates" ) print('\n'.join(lprof_lines)) print("---- Code ----") print(source) else: print("\n".join(lprof_lines[:profile_start])) print("\n".join([ "{0} \t {1}".format(l, s) for l, s in zip(lprof_code_lines, source_lines) ])) return output_val, source_lines, lprof_code_lines
def decorator(*args, **kwargs): func_return = func(*args, **kwargs) lp = LineProfiler() lp_wrap = lp(func) lp_wrap(*args, **kwargs) lp.print_stats() return func_return
def run_profile(im, filename='profile.txt', module_list=[ color, exposure, feature, filters, measure, morphology, restoration, segmentation, transform, util ], skip_functions=[]): lp = LineProfiler() functions = [] for submodule in [ exposure, feature, filters, measure, morphology, restoration, segmentation, transform, util ]: functions += inspect.getmembers(submodule, inspect.isfunction) with open(filename, 'a') as f: for function in functions: args = inspect.getargspec(function[1]) only_one_argument = only_one_nondefault(args) if function[0] in skip_functions: continue if only_one_argument: try: print(function[0]) lp_wrapper = lp(function[1]) res = lp_wrapper(im) lp.print_stats(stream=f) except TypeError: print('wrong type ', function[0]) except: print('error ', function[0]) f.close()
def getNextDay(): try: year = int(entryYear.get()) month = int(entryMonth.get()) day = int(entryDay.get()) except ValueError as error: eLinter.set("时间格式不合法:") else: time_start = time.time() lp = LineProfiler() lp.add_function(main.isRunNian) lp_wrapper = lp(checkTime) lp_wrapper(year, month, day) lp.print_stats() str1 = checkTime(year, month, day) if '' != str1: eLinter.set(str1) eYear.set('') eMonth.set('') eDay.set('') eLunar.set('') return newYear, newMonth, newDay = main.getNextday(year, month, day) #在界面显示结果 eYear.set(str(newYear)) eMonth.set(str(newMonth)) eDay.set(str(newDay)) eLinter.set("") days = int(nextday.Lunar(newYear, newMonth, newDay).getDays()) week = ["星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"] #print(week[(days+1)%7]) eLunar.set( nextday.test(newYear, newMonth, newDay) + " " + week[(days + 1) % 7])
def test_processor_performance(self): # Profile only if LineProfiler present. # To install: conda install line_profiler try: from line_profiler import LineProfiler except ImportError: print("Please install the 'line_profiler' module first.") return # simulated image size width = 2560 height = 2016 # simulated gaussian function xx, yy = numpy.meshgrid(numpy.arange(width), numpy.arange(height)) x0 = 1280 # x center y0 = 1300 # y center sx = 300 # x sigma sy = 150 # y sigma amplitude = 50 image = amplitude * numpy.exp(-(xx - x0)**2 / (2 * sx**2) - (yy - y0)**2 / (2 * sy**2)) noise = numpy.random.normal(scale=amplitude * 0.2, size=(height, width)) image = (image + numpy.abs(noise)).astype(dtype="uint16") background_image = (numpy.random.rand(2016, 2560) * 5).astype(dtype="uint16") roi = [900, 1600] axis = numpy.linspace(8980, 9020, image.shape[1]) parameters = { "background": "in_memory", "background_data": background_image } profile = LineProfiler(processor.process_image) process_image_wrapper = profile(processor.process_image) # Warm-up numba. results = processor.process_image(image, axis, "image", roi, parameters) n_iterations = 1000 start_time = time() for i in range(n_iterations): process_image_wrapper(image, axis, "image", roi, parameters) end_time = time() time_difference = end_time - start_time rate = n_iterations / time_difference print("Processing rate: ", rate) print("total_time: ", time_difference) print("n_iterations: ", n_iterations) profile.print_stats()
def line_profile(items): """A context manager which prints a line-by-line profile for the given functions, modules, or module names while execution is in its context. Example: with line_profile(__name__, Class.some_function, some_module): do_something() """ from line_profiler import LineProfiler prof = LineProfiler() for item in items: if inspect.isfunction(item): prof.add_function(item) elif inspect.ismodule(item): prof.add_module(item) elif isinstance(item, str): prof.add_module(sys.modules[str]) else: raise TypeError( 'Inputs must be functions, modules, or module names') prof.enable() yield prof.disable() prof.print_stats()
def test_given_a_text_column_when_profiler_is_applied_with_high_level_analysis_then_it_finishes_quick( ): # given if not os.path.exists(TARGET_PROFILE_REPORT_FOLDER): os.makedirs(TARGET_PROFILE_REPORT_FOLDER) profile = LineProfiler() source_data = generate_data() expected_execution_time = 4 # benchmarked: (first-time) 46.694923639297485, (cached) 5.918392 seconds # when: using default method (joblib Parallel) for parallelisation start_execution_time = time() profile_wrapper = profile(grammar_check_score) for each in source_data: profile_wrapper(each) actual_execution_time = time() - start_execution_time output_filename = f'{TARGET_PROFILE_REPORT_FOLDER}/grammar_check_score-' \ f'{datetime.now().strftime("%d-%m-%Y-%H-%M-%S")}-' \ f'{shorten_sha(git_current_head_sha())}' with open(f'{output_filename}.txt', 'w') as file: with redirect_stdout(file): profile.print_stats() profile.dump_stats(f'{output_filename}.lprof') # then assert actual_execution_time <= expected_execution_time, \ f"Expected duration: {expected_execution_time}, Actual duration: {actual_execution_time}. " \ f"Slow down by: {abs(actual_execution_time - expected_execution_time)} seconds. " \ f"We have crossed the benchmark limit after a speed up via commit 51a8952."
def test_process_image_performance(self): # Profile only if LineProfiler present. # To install: conda install line_profiler try: from line_profiler import LineProfiler except ImportError: return simulated_camera = CameraSimulation(CameraConfig("simulation"), size_x=2048, size_y=2048) x_axis, y_axis = simulated_camera.get_x_y_axis() x_size, y_size = simulated_camera.get_geometry() image_background_array = numpy.zeros(shape=(y_size, x_size), dtype="uint16") + 3 parameters = { "image_threshold": 1, "image_region_of_interest": [0, 2048, 0, 2048], "image_good_region": { "threshold": 0.3, "gfscale": 1.8 }, "image_slices": { "number_of_slices": 5, "scale": 1.0, "orientation": "horizontal" } } profile = LineProfiler(process_image) process_image_wrapper = profile(process_image) n_iterations = 300 print("Generating images.") images = [] for _ in range(n_iterations): images.append(simulated_camera.get_image()) print("Processing images.") start_time = time.time() for image in images: process_image_wrapper(image=image, timestamp=time.time(), x_axis=x_axis, y_axis=y_axis, parameters=parameters, image_background_array=image_background_array) end_time = time.time() time_difference = end_time - start_time rate = n_iterations / time_difference print("Processing rate: ", rate) profile.print_stats()
def profiled_func(*args, **kwargs): try: profiler = LineProfiler() profiler.add_function(func) profiler.enable_by_count() return func(*args, **kwargs) finally: profiler.print_stats()
def wrapper(*args, **kwargs): lp = LineProfiler() deco = lp(func) res = deco(*args, **kwargs) s = StringIO() lp.print_stats(stream=s) print(s.getvalue()) return res
def notest_03pic1(self): filename = '../scenes/102.png' #res = send_pic_file(filename) lp = LineProfiler() lp_wrapper = lp(send_pic_file) res = lp_wrapper(filename,True) lp.print_stats() print(res)
def printProfile(*args): lp = LineProfiler() dec_f = lp(f) output_value = dec_f(*args) print("Line Profile for:",title) print("----------------------") lp.print_stats() return output_value
def profile_list_serialization(serializer, child_serializer, instances_list): if os.environ.get('CI', None) != 'true' or not LineProfiler: return profile = LineProfiler(serializer.instances_list, child_serializer.instances_list) profile.enable() serializer.to_representation(instances_list) profile.disable() profile.print_stats()
def DataReader_bin_test2(self): self.select_file(num=1) prf = LineProfiler() prf.add_function(self.read_bin_file_to_tx2) prf.runcall(self.read_bin_file_to_tx2, start=3 * 10**7, datapoints=10**6) prf.print_stats() print(len(self.x), math.log10((len(self.x)))) self.plot_timecorse_of_move(show_it=1)
def wrap(*args, **kwargs): profile = LineProfiler() profile.add_function(f) profile.enable_by_count() result = f(*args, **kwargs) profile.disable_by_count() profile.print_stats(sys.stdout) return result
def profile_list_deserialization(serializer, child_serializer, data_list): if os.environ.get('CI', None) != 'true' or not LineProfiler: return profile = LineProfiler(serializer.to_internal_value, child_serializer.to_internal_value) profile.enable() serializer.to_internal_value(data_list) profile.disable() profile.print_stats()
def profiled_func(*args, **kwargs): try: lp = LineProfiler() lp.add_function(f) lp.enable_by_count() return f(*args, **kwargs) finally: lp.print_stats()
def _decorator(*args, **kwargs): func_return = f(*args, **kwargs) if not LINE_PROFILER_ENABLE: return func_return lp = LineProfiler() lp_wrap = lp(f) lp_wrap(*args, **kwargs) lp.print_stats() return func_return
def profiled_func(*args, **kwargs): line_profiler = LineProfiler() line_profiler.add_function(func) map(lambda x: line_profiler.add_function(x), self.follow) line_profiler.enable_by_count() result = func(*args, **kwargs) line_profiler.disable_by_count() line_profiler.print_stats(stripzeros=True) return result
def profile_each_line(func, *args, **kwargs): profiler = LineProfiler() profiled_func = profiler(func) retval = None try: retval = profiled_func(*args, **kwargs) finally: profiler.print_stats() return retval
def predict_time_profile(self, img): # run multi time for i in range(8): print("*********** {} profile time *************".format(i)) lp = LineProfiler() lp_wrapper = lp(self.do_predict) ret = lp_wrapper(img) lp.print_stats() return ret
def profile_lines(self): """ A simple wrapper to call the line_profiler. Prints the line_profiler output """ lp = LineProfiler() lp_wrapper = lp(self.function) lp_wrapper(**self.params) lp.print_stats()
def profiled_func(*args, **kwargs): try: profiler = LineProfiler() profiler.add_function(func) for f in follow: profiler.add_function(getattr(args[0], f)) profiler.enable_by_count() return func(*args, **kwargs) finally: profiler.print_stats()
def wrapped_fn(*args, **kwargs): try: profiler = LineProfiler() profiler.add_function(fn) for f in follow: profiler.add_function(f) profiler.enable_by_count() return fn(*args, **kwargs) finally: profiler.print_stats()
def profiled_func(*args, **kwargs): try: pf = LineProfiler() pf.add_function(func) for f in follow: pf.add_function(f) pf.enable_by_count() return func(*args, **kwargs) finally: pf.print_stats()
def timetest(func, *para): p = LineProfiler() p.add_function(func) p.enable_by_count() p_wrapper = p(func) p_wrapper(*para) # Printing print(func(*para)) p.print_stats()
def profiled_func(*args, **kwargs): try: profiler = LineProfiler() profiler.add_function(func) for f in follow: if isinstance(f, basestring): f = to_function(f) profiler.add_function(f) profiler.enable_by_count() return func(*args, **kwargs) finally: profiler.print_stats()
def speedtest_validate_transaction(): # create a transaction b = bigchaindb.Bigchain() tx = b.create_transaction(b.me, b.me, None, 'CREATE') tx_signed = b.sign_transaction(tx, b.me_private) # setup the profiler profiler = LineProfiler() profiler.enable_by_count() profiler.add_function(bigchaindb.Bigchain.validate_transaction) # validate_transaction 1000 times for i in range(1000): b.validate_transaction(tx_signed) profiler.print_stats()
def main(): profiler = cProfile.Profile() profiler.enable() function_runner('original_method') function_runner('step_one') function_runner('step_two') function_runner('step_three') function_runner('step_four') function_runner('step_five') function_runner('step_six') function_runner('step_seven') function_runner('step_eight') function_runner('step_nine') function_runner('current') profiler.disable() profiler.dump_stats('function_event.stats') line_profiler = LineProfiler(CurrentFunctionContainer().current) line_profiler.enable() function_runner('current') line_profiler.disable() line_profiler.dump_stats('function_event.line_stats') line_profiler.print_stats() print 'Original', timeit.timeit( lambda: function_runner('original_method'), number=7) print 'One', timeit.timeit( lambda: function_runner('step_one'), number=7) print 'Two', timeit.timeit( lambda: function_runner('step_two'), number=7) print 'Three', timeit.timeit( lambda: function_runner('step_three'), number=7) print 'Four', timeit.timeit( lambda: function_runner('step_four'), number=7) print 'Five', timeit.timeit( lambda: function_runner('step_five'), number=7) print 'Six', timeit.timeit( lambda: function_runner('step_six'), number=7) print 'Seven', timeit.timeit( lambda: function_runner('step_seven'), number=7) print 'Eight', timeit.timeit( lambda: function_runner('step_eight'), number=7) print 'Nine', timeit.timeit( lambda: function_runner('step_nine'), number=7) print 'Current', timeit.timeit( lambda: function_runner('current'), number=7)
def profile(algo, data=None, to_profile=[]): """ Profile algorithm using line_profiler. :param algo: Algorithm instance. :param data: Stock prices, default is random portfolio. :param to_profile: List of methods to profile, default is `step` method. Example of use: tools.profile(Anticor(window=30, c_version=False), to_profile=[Anticor.weights]) """ from line_profiler import LineProfiler if data is None: data = random_portfolio(n=1000, k=10, mu=0.) to_profile = to_profile or [algo.step] profile = LineProfiler(*to_profile) profile.runcall(algo.run, data) profile.print_stats()
class Profiler(object): def __init__(self, *args): self.profile = LineProfiler() if len(args) > 0: for func in args: if callable(func): self.add_function(func) def add_function(self, func): self.profile.add_function(func) def __enter__(self): self.profile.enable_by_count() def __exit__(self, type, value, traceback): self.profile.disable_by_count() self.profile.print_stats()
def run_profiling(args): lprofiler = LineProfiler() monitor_fuctions = [api.problem.submit_key, api.problem.get_unlocked_pids, api.problem.get_solved_pids, api.problem.get_all_problems, api.problem.get_solved_problems, api.stats.get_score, api.cache.memoize, api.autogen.grade_problem_instance, api.autogen.get_problem_instance, api.autogen.get_number_of_instances] for func in monitor_fuctions: lprofiler.add_function(func) lprofiler.enable() if args.stack: profiler = Profiler(use_signal=False) profiler.start() for func, a, kw in operations: func(*a, **kw) if args.stack: profiler.stop() lprofiler.disable() if args.print: print(profiler.output_text(unicode=True, color=True)) lprofiler.print_stats() output = open(args.output, "w") if args.stack: output.write(profiler.output_text(unicode=True)) if args.output_html is not None: output_html = open(args.output_html, "w") output_html.write(profiler.output_html()) output_html.close() print("Wrote test info to " + args.output_html) lprofiler.print_stats(output) output.close() print("Wrote test info to " + args.output)
self._add_msg(m) return m if __name__ == "__main__": import sys use_profiler = False if use_profiler: from line_profiler import LineProfiler profiler = LineProfiler() profiler.add_function(DFReader_binary._parse_next) profiler.add_function(DFReader_binary._add_msg) profiler.add_function(DFReader._set_time) profiler.enable_by_count() filename = sys.argv[1] if filename.endswith('.log'): log = DFReader_text(filename) else: log = DFReader_binary(filename) while True: m = log.recv_msg() if m is None: break #print(m) if use_profiler: profiler.print_stats()
parser.add_argument('--plot', action='store_true', help='Upload time plot to plotly') args = parser.parse_args() if args.time: xxx = msg_mass() test_full_client(xxx) if args.cprof: import cProfile xxx = msg_mass() cProfile.run("test_full_client(xxx)", sort="tottime") if args.lprof: from line_profiler import LineProfiler #import rscoin.rscservice profile = LineProfiler(rscoin.rscservice.RSCProtocol.handle_Query, rscoin.rscservice.RSCFactory.process_TxQuery, rscoin.Tx.check_transaction, rscoin.Tx.check_transaction_utxo, rscoin.Tx.parse) xxx = msg_mass() profile.run("test_full_client(xxx)") profile.print_stats()
def __init__(self, proj, geo, angles, niter, **kwargs): lp = LineProfiler() lp_wrapper = lp(super(lineprofileroveride, self).__init__) lp_wrapper(proj, geo, angles, niter, **kwargs) lp.print_stats()
class SpecialTestRunner(SpecialTest): """ Test runner, calls the specified test under specified profiler Mode = None - no profiler, "c" - cProfile, "l" - LineProfiler, "h" - hotshot """ def __init__(self, test, mode=None): super(SpecialTestRunner, self).__init__() self.mode = mode self.test = test self.profiler = None def setup(self): if self.mode == 'c': import cProfile self.profiler = cProfile.Profile() elif self.mode == 'l': from line_profiler import LineProfiler self.profiler = LineProfiler() elif self.mode == 'h': import hotshot self.info['name'] = 'special.prof' self.profiler = hotshot.Profile(self.info['name']) self.test.setup() def run(self): if self.mode == 'c': self.profiler.enable() elif self.mode == 'l': self.profiler.enable_by_count() self.profiler.add_function(Handler.handle) self.profiler.add_function(Condition.check_string_match) self.profiler.add_function(Condition.check_function) self.profiler.add_function(Condition.check_list) t = Timer() # Run itself if self.mode == 'h': self.profiler.runcall(self.test.run) else: self.test.run() print('Test time: %s' % t.delta()) if self.mode == 'c': import pstats import StringIO self.profiler.disable() sio = StringIO.StringIO() ps = pstats.Stats(self.profiler, stream=sio).sort_stats('time') ps.print_stats() print(sio.getvalue()) elif self.mode == 'h': import hotshot.stats print('Processing results...') self.profiler.close() name = self.info['name'] stats = hotshot.stats.load(name) stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(50) print('Run "hotshot2calltree -o %s.out %s" to generate the cachegrind file' % (name, name)) elif self.mode == 'l': self.profiler.disable() self.profiler.print_stats()
def _transform_STRIKED(self, match: Match) -> str: if not match['STRIKED_TEXT']: return '' return f"<s>{self._parse(match['STRIKED_TEXT'], 'STRIKED')}</s>" def _transform_SUPERSCRIPT(self, match: Match) -> str: if not match['SUPERSCRIPT_TEXT']: return '' return f"<sup>{self._parse(match['SUPERSCRIPT_TEXT'], 'SUPERSCRIPT')}</sup>" def _transform_SUBSCRIPT(self, match: Match) -> str: if not match['SUBSCRIPT_TEXT']: return '' return f"<sub>{self._parse(match['SUBSCRIPT_TEXT'], 'SUBSCRIPT')}</sub>" def _transform_HORIZ_RULE(self, match: Match) -> str: return f'<hr />' def _post_BACKSLASH_UNESCAPE(self, text: str) -> str: return self._backslash_escape_re.sub(r'\1', text) d = DefaultRenderer() if __name__ == '__main__': from line_profiler import LineProfiler lp = LineProfiler() lp.add_function(d._parse) lp.runcall(d.parse, '*****' * 2000) lp.print_stats() print(d.parse('**__hi__**'))