def _thread_profile(self, f, *args, **kwds): # we lose the first profile point for a new thread in order to # trampoline a new Profile object into place thr = thread.get_ident() self._g_threadmap[thr] = p = Profiler() # this overrides our sys.setprofile hook: p.enable(subcalls=True, builtins=True)
def profile(f, *args, **kwds): """XXX docstring""" global _g_threadmap p = Profiler() p.enable(subcalls=True) threading.setprofile(_thread_profile) # Note: The except clause is needed below so that profiling data still # gets dumped even when exceptions are encountered. The except clause code # is taken straight from run_bzr_catch_errrors() in commands.py and ought # to be kept in sync with it. try: try: ret = f(*args, **kwds) except (KeyboardInterrupt, Exception), e: import bzrlib.trace bzrlib.trace.report_exception(sys.exc_info(), sys.stderr) ret = 3 finally: p.disable() for pp in _g_threadmap.values(): pp.disable() threading.setprofile(None) threads = {} for tid, pp in _g_threadmap.items(): threads[tid] = Stats(pp.getstats(), {}) _g_threadmap = {} return ret, Stats(p.getstats(), threads)
def start(self): """Start profiling. This hooks into threading and will record all calls made until stop() is called. """ self._g_threadmap = {} self.p = Profiler() permitted = self.__class__.profiler_lock.acquire( self.__class__.profiler_block) if not permitted: raise errors.InternalBzrError(msg="Already profiling something") try: self.p.enable(subcalls=True) threading.setprofile(self._thread_profile) except: self.__class__.profiler_lock.release() raise
def profile(f, *args, **kwds): """XXX docstring""" p = Profiler() p.enable(subcalls=True, builtins=True) try: f(*args, **kwds) finally: p.disable() return Stats(p.getstats())
def profile(f, *args, **kwds): """FIXME: docstring""" global _g_threadmap p = Profiler() p.enable(subcalls=True) threading.setprofile(_thread_profile) try: ret = f(*args, **kwds) finally: p.disable() for pp in _g_threadmap.values(): pp.disable() threading.setprofile(None) threads = {} for tid, pp in _g_threadmap.items(): threads[tid] = Stats(pp.getstats(), {}) _g_threadmap = {} return ret, Stats(p.getstats(), threads)
class BzrProfiler(object): """Bzr utility wrapper around Profiler. For most uses the module level 'profile()' function will be suitable. However profiling when a simple wrapped function isn't available may be easier to accomplish using this class. To use it, create a BzrProfiler and call start() on it. Some arbitrary time later call stop() to stop profiling and retrieve the statistics from the code executed in the interim. Note that profiling involves a threading.Lock around the actual profiling. This is needed because profiling involves global manipulation of the python interpreter state. As such you cannot perform multiple profiles at once. Trying to do so will lock out the second profiler unless the global bzrlib.lsprof.BzrProfiler.profiler_block is set to 0. Setting it to 0 will cause profiling to fail rather than blocking. """ profiler_block = 1 """Serialise rather than failing to profile concurrent profile requests.""" profiler_lock = threading.Lock() """Global lock used to serialise profiles.""" def start(self): """Start profiling. This hooks into threading and will record all calls made until stop() is called. """ self._g_threadmap = {} self.p = Profiler() permitted = self.__class__.profiler_lock.acquire( self.__class__.profiler_block) if not permitted: raise errors.InternalBzrError(msg="Already profiling something") try: self.p.enable(subcalls=True) threading.setprofile(self._thread_profile) except: self.__class__.profiler_lock.release() raise def stop(self): """Stop profiling. This unhooks from threading and cleans up the profiler, returning the gathered Stats object. :return: A bzrlib.lsprof.Stats object. """ try: self.p.disable() for pp in self._g_threadmap.values(): pp.disable() threading.setprofile(None) p = self.p self.p = None threads = {} for tid, pp in self._g_threadmap.items(): threads[tid] = Stats(pp.getstats(), {}) self._g_threadmap = None return Stats(p.getstats(), threads) finally: self.__class__.profiler_lock.release() def _thread_profile(self, f, *args, **kwds): # we lose the first profile point for a new thread in order to # trampoline a new Profile object into place thr = thread.get_ident() self._g_threadmap[thr] = p = Profiler() # this overrides our sys.setprofile hook: p.enable(subcalls=True, builtins=True)
def _profile_it(func, func_positional_arguments, func_keyword_arguments, output_max_slashes_fileinfo, profileit__repeat): """ Returns a dictionary with the profile result: the function runs only once. .. note:: excludes a couple of not relative functions/methods - excludes: profiler.enable() - exclude: profiler.disable() - exclude: cProfile.Profile.runcall() :param func: :param func_positional_arguments: (list) positional arguments for the function :param func_keyword_arguments: (dict) any keyword arguments for the function :param output_max_slashes_fileinfo: (int) to adjust max path levels in the profile info :param profileit__repeat: (int) how often the function is repeated: the result will be the sum of all: similar to the code below .. code-block:: python3 for repeat in range(profileit__repeat): profiler.enable() profiler.runcall(func, *func_positional_arguments, **func_keyword_arguments) profiler.disable() :return: (tuple) format: (summary_dict, table): table = list_of_dictionaries (sorted profile result lines dict) :raise Err: """ profiler = Profiler() for repeat in range(profileit__repeat): profiler.enable() func(*func_positional_arguments, **func_keyword_arguments) profiler.disable() total_calls = 0 primitive_calls = 0 total_time = 0 table = [] for func_tmp, (pcalls, ncalls, tottime, cumtime, callers) in create_stats(profiler).items(): temp_dict = { 'number_of_calls': '{:,}'.format(pcalls) if pcalls == ncalls else '{:,}/{:,}'.format(pcalls, ncalls), 'func_time': tottime, 'func_cumulative_time': cumtime } if func_tmp[0] == '~': # exclude the profiler.enable()/disable() functions if '_lsprof.Profiler' in func_tmp[2]: continue else: temp_func_txt = func_tmp[2] else: # adjust path levels temp_path_file_ect = func_tmp[0] temp_slashes = temp_path_file_ect.count('/') if temp_slashes > output_max_slashes_fileinfo: temp_func_txt = '{}:{}({})'.format( temp_path_file_ect.split('/', temp_slashes - output_max_slashes_fileinfo)[-1], func_tmp[1], func_tmp[2] ) else: temp_func_txt = '{}:{}({})'.format(temp_path_file_ect, func_tmp[1], func_tmp[2]) if temp_func_txt[0] == '<' and temp_func_txt[-1] == '>': temp_dict['func_txt'] = temp_func_txt[1:-1] elif temp_func_txt[0] == '<': temp_dict['func_txt'] = temp_func_txt[1:] elif temp_func_txt[-1] == '>': temp_dict['func_txt'] = temp_func_txt[:-1] else: temp_dict['func_txt'] = temp_func_txt table.append(temp_dict) total_calls += ncalls primitive_calls += pcalls total_time += tottime if ("jprofile", 0, "profiler") in callers: raise Err('_profile_it', ['ERROR NOT SURE WHAT To DO HERE: SEE pstate.py: get_top_level_stats()', func]) summary_dict = { 'total_calls': total_calls, 'primitive_calls': primitive_calls, 'total_time': total_time } return summary_dict, table