def get_stats(self, session): output = StringIO() stats = None temp_files = [] try: for profile in session.profiles.all(): if profile.dump.path: log.debug('Adding local profile dump') path = profile.dump.path else: log.debug('Creating a temporary file for remote profile dump') temp, path = mkstemp(dir=self.tempdir) temp = fdopen(temp) temp_files.append((temp, path)) log.debug('Copying content from remote dump to tempfile') temp.write(profile.dump.read()) log.debug('Adding tempfile profile dump') if stats is None: log.debug('Creating a Stats object') stats = Stats(path, stream=output) else: log.debug('Appending to existing Stats object') stats.add(path) finally: for temp, path in temp_files: log.debug('Removing temporary file at %s' % (path,)) temp.close() unlink(path) return stats, output
def __init__(self): settings_manager = SettingsManager() # Set up the settings_manager max_workers = settings_manager.getint('application', 'max-workers') # Get the max workers from settings manager profiler_on = settings_manager.getint('debugging', 'profiler-on') # Get whether there is a profiler absolute = settings_manager.getint('save', 'absolute') # Get whether it's an absolute path save_path = settings_manager.get('save', 'path') # Get whether it's an absolute path if not absolute: save_path = PROJECT_PATH + os.path.sep + save_path executor = ThreadPoolExecutor(max_workers=max_workers, profiler_on=profiler_on) # Set up the thread executor dis = Disassembler(settings_manager) # Build the disassembler server = PyDAServer('0.0.0.0',9000) # Set up the PyDA server save_manager = SaveManager(save_path) if profiler_on: profile = Profile() profile.enable() app.build_and_run(settings_manager, dis, executor, server, save_manager) # Run the interface if profiler_on: profile.disable() stats = executor.getProfileStats() if stats == None: stats = Stats(profile) else: stats.add(profile) with open('profile.stats', 'wb') as statsfile: stats.stream = statsfile stats.sort_stats('cumulative').print_stats()
def concat(pattern, outfile, mpi=None): if mpi: from mpi4py import MPI pattern = pattern % MPI.COMM_WORLD.rank outfile = outfile % MPI.COMM_WORLD.rank files = glob(pattern) if files: s = Stats(files[0]) for f in files[1:]: s.add(f) s.dump_stats(outfile) for f in files: os.remove(f)
def render_snakeviz(name, sessions): import snakeviz from snakeviz.stats import json_stats, table_rows SNAKEVIZ_PATH = os.path.join(os.path.dirname(snakeviz.__file__), 'templates', 'viz.html') with open(SNAKEVIZ_PATH) as f: SNAKEVIZ_TEMPLATE = Template(f.read()) pstats = Stats(sessions[0]) for session in sessions[1:]: pstats.add(session) rendered = SNAKEVIZ_TEMPLATE.generate( profile_name=name, table_rows=table_rows(pstats), callees=json_stats(pstats) ).decode('utf-8').replace('/static/', '/snakeviz/static/') return escape(rendered), "background-color: white;"
def process_view(self, request, view_func, view_args, view_kwargs): from cProfile import Profile from pstats import Stats full_name = "{v.__module__}.{v.func_name}".format(v=view_func) if self.regex.match(full_name): profile = Profile() response = profile.runcall(view_func, request, *view_args, **view_kwargs) stats = Stats(profile) if os.path.exists(self.filename): stats.add(self.filename) stats.strip_dirs() stats.dump_stats(self.filename) return response
def __init__(self): settings_manager = SettingsManager() # Set up the settings_manager max_workers = settings_manager.getint( 'application', 'max-workers') # Get the max workers from settings manager profiler_on = settings_manager.getint( 'debugging', 'profiler-on') # Get whether there is a profiler absolute = settings_manager.getint( 'save', 'absolute') # Get whether it's an absolute path save_path = settings_manager.get( 'save', 'path') # Get whether it's an absolute path if not absolute: save_path = PROJECT_PATH + os.path.sep + save_path executor = ThreadPoolExecutor( max_workers=max_workers, profiler_on=profiler_on) # Set up the thread executor dis = Disassembler(settings_manager) # Build the disassembler server = PyDAServer('0.0.0.0', 9000) # Set up the PyDA server save_manager = SaveManager(save_path) if profiler_on: profile = Profile() profile.enable() app.build_and_run(settings_manager, dis, executor, server, save_manager) # Run the interface if profiler_on: profile.disable() stats = executor.getProfileStats() if stats == None: stats = Stats(profile) else: stats.add(profile) with open('profile.stats', 'wb') as statsfile: stats.stream = statsfile stats.sort_stats('cumulative').print_stats()
class Profiler(object): def __init__(self, name=None): self._lock = RLock() self._name = name or settings.PROFILE_DEFAULT_NAME self._location = settings.PROFILE_FILENAME_TEMPLATE % self._name self._stats = None self._local = ProfilerLocal() self._notch = time() - 1 self._updates = False def _set_status(self, value): settings.PROFILING = value status = property(lambda self: settings.PROFILING, _set_status) def __enter__(self): if not settings.PROFILING: return self if not self._local.counter: self._local.profile = Profile() self._local.profile.enable() self._local.counter += 1 return self def __exit__(self, extype, exvalue, extraceback): if not settings.PROFILING: return self._local.counter -= 1 if not self._local.counter: self._local.profile.disable() self.aggregate(self._local.profile) self._local.profile = None def aggregate(self, profile_or_stats): with self._lock: if self._stats is None: self._stats = Stats(profile_or_stats) else: self._stats.add(profile_or_stats) self._updates = True def clear(self): with self._lock: if self._stats is not None: self._stats = None @contextmanager def hold(self, location=None): if not settings.PROFILING: yield None else: if location is None: location = self._location with self._lock: self.save(location=location, force=True) yield location def load(self, location=None): if not settings.PROFILING: return None if location is None: location = self._location with self._lock: self.save(location=location, force=True) try: with open(location, "rb") as file: return file.read() except Exception as error: if isinstance(error, IOError) and error.errno == errno.ENOENT: return None else: raise def save(self, location=None, force=False): if not (settings.PROFILING and self._updates): return now = time() if not force and now < self._notch: return if location is None: location = self._location with self._lock: if self._stats: self._stats.dump_stats(location) if not force: server_log.write("Save profiling statistics to \"%s\"" % os.path.basename(location)) self._notch = now + settings.PROFILING_SAVE_PERIODICITY self._updates = False def autosave(self): self.save() for profiler in self._profilers.itervalues(): profiler.save()
#!/usr/bin/python # This file is part of TRS (http://math.kompiler.org) # # TRS is free software: you can redistribute it and/or modify it under the # terms of the GNU Affero General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # TRS is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License # along with TRS. If not, see <http://www.gnu.org/licenses/>. import sys from pstats import Stats s = Stats(sys.argv[1]) for profile in sys.argv[2:]: s.add(profile) s.sort_stats('cumulative') s.print_stats()
class ThreadPoolExecutor: ''' My own personal implementation of concurrent.futures.ThreadPoolExecutor as it does not exist in the default packages of Python 2.7. I didn't want people to have to install a dependency, so I wrote my own class. It mimics the actual concurrent.futures.ThreadPoolExecutor ''' def __init__(self, max_workers=8, profiler_on=0): self.profiler_on = profiler_on if self.profiler_on: self.stats = None self.function_queue = Queue() self.activate_worker = Condition() self.shut_down = False self.workers = [ Thread(target=self.worker) for i in xrange(max_workers) ] for thread in self.workers: thread.start() def worker(self): profile = None if self.profiler_on: profile = Profile() self.activate_worker.acquire() try: while not self.shut_down: while not self.function_queue.empty(): try: self.do_work(self.function_queue.get(False), profile) except: raise if self.shut_down: raise ShutdownException self.activate_worker.wait() except ShutdownException: pass except: raise self.activate_worker.release() def do_work(self, args, profile): fn, args, kwargs = args if profile: profile.enable() fn(*args, **kwargs) if profile: profile.disable() if self.stats == None: self.stats = Stats(profile) else: self.stats.add(profile) def submit(self, fn, *args, **kwargs): self.activate_worker.acquire() self.function_queue.put((fn, args, kwargs)) self.activate_worker.notify() self.activate_worker.release() def map(self, func, iterables, timeout=None): for data in iterables: self.function_queue.put((func, data, {})) self.activate_worker.acquire() self.activate_worker.notifyAll() self.activate_worker.release() def shutdown(self, wait=True): self.shut_down = True self.activate_worker.acquire() self.activate_worker.notifyAll() self.activate_worker.release() if wait: for worker in self.workers: worker.join() def getProfileStats(self): return self.stats
class ThreadPoolExecutor: ''' My own personal implementation of concurrent.futures.ThreadPoolExecutor as it does not exist in the default packages of Python 2.7. I didn't want people to have to install a dependency, so I wrote my own class. It mimics the actual concurrent.futures.ThreadPoolExecutor ''' def __init__(self, max_workers=8, profiler_on=0): self.profiler_on = profiler_on if self.profiler_on: self.stats = None self.function_queue = Queue() self.activate_worker = Condition() self.shut_down = False self.workers = [Thread(target=self.worker) for i in xrange(max_workers)] for thread in self.workers: thread.start() def worker(self): profile = None if self.profiler_on: profile = Profile() self.activate_worker.acquire() try: while not self.shut_down: while not self.function_queue.empty(): try: self.do_work(self.function_queue.get(False), profile) except: raise if self.shut_down: raise ShutdownException self.activate_worker.wait() except ShutdownException: pass except: raise self.activate_worker.release() def do_work(self, args, profile): fn, args, kwargs = args if profile: profile.enable() fn(*args, **kwargs) if profile: profile.disable() if self.stats == None: self.stats = Stats(profile) else: self.stats.add(profile) def submit(self, fn, *args, **kwargs): self.activate_worker.acquire() self.function_queue.put((fn, args, kwargs)) self.activate_worker.notify() self.activate_worker.release() def map(self, func, iterables, timeout=None): for data in iterables: self.function_queue.put((func, data, {})) self.activate_worker.acquire() self.activate_worker.notifyAll() self.activate_worker.release() def shutdown(self, wait=True): self.shut_down = True self.activate_worker.acquire() self.activate_worker.notifyAll() self.activate_worker.release() if wait: for worker in self.workers: worker.join() def getProfileStats(self): return self.stats