def profile(): num_attempts = 5 attempts = [] for _ in range(num_attempts): profiler = Profile() start = time.time() profiler.runcall(test) duration = time.time() - start attempts.append((duration, profiler)) attempts.sort() profile_file = "profile.{}.prof".format( datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) attempts[0][1].dump_stats(profile_file) print("{}s (best of {}), profile saved as: {}".format( attempts[0][0], num_attempts, profile_file))
def profiled(*args, **kargs): profile = Profile() profile.enable() func(*args, **kargs) profile.disable() ident = current_thread().ident profile.dump_stats("/tmp/%s.%s.%i.pstat" % (hs.hostname, func.__name__, ident))
def main(): """Profiling main function.""" profiler = Profile() profiler = profiler.runctx("run_app()", globals(), locals()) iostream = StringIO() stats = Stats(profiler, stream=iostream) stats.sort_stats("time") # or cumulative stats.print_stats(80) # 80 == how many to print # optional: # stats.print_callees() # stats.print_callers() logging.info("Profile data:\n%s", iostream.getvalue())
def __init__(self, monitoring_dir: str, with_runtime: bool = False, with_profiler: bool = False): self._monitoring_dir = monitoring_dir self._duration_sampler = _new_duration_sampler(monitoring_dir) self._runtime_bucket = None self._profiler = None self._lock = Lock() if with_runtime: self._runtime_bucket = _start_runtime_sampler(monitoring_dir) if with_profiler: self._profiler = Profile() self._profiler.enable()
def __init__(self, name, directory=None): self.base = ProfileBase() self.base.enable() self.path = get_path(PROFILE_DIR, name, directory=directory, ending='.profile')
def inner(*args, **kwargs): result = None try: profiler = Profile() result = profiler.runcall(fn, *args, **kwargs) finally: try: os.mkdir(PROFILER_LOG_DIR) except os.error: pass filename = PROFILER_LOG_DIR + _get_filename_for_function(fn) with open(filename, 'w') as stream: stats = pstats.Stats(profiler, stream=stream) stats.strip_dirs().sort_stats(*sort_args).print_stats( *print_args) return result
def profile(sort_args=['cumulative'], print_args=[10]): ''' Simple wrapper of cProfile python library to print in stdout useful infos about specific function runtime Suggested usage: Insert the following line before the definitio of the function whose profiling is needed @profile(sort_args=['name'], print_args=[N]) with N = # of tasks which are listed ''' profiler = Profile() def decorator(fn): def inner(*args, **kwargs): result = None try: result = profiler.runcall(fn, *args, **kwargs) finally: stats = pstats.Stats(profiler) stats.strip_dirs().sort_stats(*sort_args).print_stats( *print_args) return result return inner return decorator
def profile(self, request): """Start/stop the python profiler, returns profile results""" profile = self.__dict__.get("_profile") if "start" in request.properties: if not profile: profile = self.__dict__["_profile"] = Profile() profile.enable() self._log(LOG_INFO, "Started python profiler") return (OK, None) if not profile: raise BadRequestStatus("Profiler not started") if "stop" in request.properties: profile.create_stats() self._log(LOG_INFO, "Stopped python profiler") out = StringIO() stats = pstats.Stats(profile, stream=out) try: stop = request.properties["stop"] if stop == "kgrind": # Generate kcachegrind output using pyprof2calltree from pyprof2calltree import convert convert(stats, out) elif stop == "visualize": # Start kcachegrind using pyprof2calltree from pyprof2calltree import visualize visualize(stats) else: stats.print_stats() # Plain python profile stats return (OK, out.getvalue()) finally: out.close() raise BadRequestStatus("Bad profile request %s" % (request))
def setUpClass(cls): cls.profiler = Profile() cls.profiler.enable() # Check if the server is running pipe1 = Popen(["ps", "aux"], stdout=PIPE) pipe2 = Popen(["grep", "doController.py"], stdin=pipe1.stdout, stdout=PIPE) pipe1.stdout.close() pipe1 = Popen(["grep", "-v", "grep"], stdin=pipe2.stdout, stdout=PIPE) pipe2.stdout.close() procs = pipe1.communicate()[0] # If the server is running, we keep it running if b"doController.py" in procs: urlopen('http://localhost:8000/restart') cls.is_running = True else: # If the server is not running, start it for the duration of the tests Popen([ 'python3', '{0}src/controller.py'.format( __file__[:__file__.index('tests')]) ]) i = 0 while not get_connection() and i < 100: i += 1 cls.cli = client.HTTPConnection('localhost:8000', timeout=100)
def profile_ctx(engine='pyinstrument'): """ A context manager which profiles the body of the with statement with the supplied profiling engine and returns the profiling object in a list. Arguments --------- engine: str The profiling engine, e.g. 'pyinstrument' or 'snakeviz' Returns ------- sessions: list A list containing the profiling session. """ if engine == 'pyinstrument': from pyinstrument import Profiler prof = Profiler() prof.start() elif engine == 'snakeviz': prof = Profile() prof.enable() elif engine is None: pass sessions = [] yield sessions if engine == 'pyinstrument': sessions.append(prof.stop()) elif engine == 'snakeviz': prof.disable() sessions.append(prof)
def find_slow(function): with Profile() as pr: function() stats = pstats.Stats(pr) stats.sort_stats(pstats.SortKey.TIME) stats.print_stats()
def __call__(self, environ, start_response): userid = environ.get('REMOTE_USER', None) if userid is None: author = environ.get('HTTP_AUTHORIZATION', None) if author is not None and author.strip().lower()[:5] == 'basic': userpw_base64 = author.strip()[5:].strip() userid, pw = userpw_base64.decode('base64').split(':', 1) if userid not in self.debugusers: return self.app(environ, start_response) response_body = [] response = {'status': None, 'headers': None, 'exc_info': None} def catching_start_response(status, headers, exc_info=None): response['status'] = status response['headers'] = headers response['exc_info'] = exc_info return response_body.append def runapp(): appiter = self.app(environ, catching_start_response) response_body.extend(appiter) if hasattr(appiter, 'close'): appiter.close() p = Profile() p.runcall(runapp) body = ''.join(response_body) data = StringIO() stats = Stats(p, stream=data) stats.sort_stats('cumulative', 'calls') stats.print_stats(.1) body = body.replace( '<div id="cydra_note">', '<div id="profile_button"><button onclick="$(\'#profiledata\').toggle();">Profile</button></div><div id="profiledata" style="position: absolute; display: none; z-index: 1000; top: 25pt; left: 10pt; background-color: white; border: 1px solid black;"><pre>%s</pre></div>' % data.getvalue() + '<div id="cydra_note">') response_headers = dict(response['headers']) response_headers['Content-Length'] = len(body) response_headers = response_headers.items() start_response(response['status'], response_headers, response['exc_info']) return [body]
class ProfileHelper(ProfilingHelper): def run(self): def run_callable(): "A local function we can refer to in a string with profile.run" self.result = self.callable(*self.args, **self.kwargs) try: from cProfile import Profile except ImportError: from profile import Profile self.p = Profile().runctx("run_callable()", globals(), locals()) self.p.dump_stats(self.filename) def stats(self): import pstats return pstats.Stats(self.p)
def test_080_fnovalid(self): fname = self.get_fname_stats('_novalid') Profile().dump_stats(fname) result = get_pstats_print2list(fname, filter_fnames=[], exclude_fnames=None, limit=None) self.assertFalse(result)
def wrapped(*args, **kwargs): if param: pr = Profile() pr.enable() result = func(*args, **kwargs) pr.disable() pr.dump_stats(func.__name__ + ".cprofile") else: result = func(*args, **kwargs) return result
def profiled_funk(*args, **kwargs): profile = Profile() try: profile.enable() ret_val = funk(*args, **kwargs) profile.disable() finally: print("__CPROFILE__") profile.print_stats() return ret_val
def apply_some_filters_profile(): frames = tests.input_bulk(200) profiler = Profile() profiler.enable() augmentation.apply_some_filters(frames, tests.config.framerate) profiler.disable() profiler.print_stats()
def run(): bcs_gap_energy = BCSGapEnergy(1.5e-3, 4000) temperatures = linspace(0, bcs_gap_energy.critical_temperature(), 500) with Profile() as profile: for temperature in temperatures: bcs_gap_energy.evaluate(temperature) profile.print_stats()
def setup(self, bottom, top): """ Setup data layer according to parameters: - data_dir: path to training images dir - split: train / test: data set to load image names from - tops: list of tops to output from {image, label} - seed: seed for randomization (default: None / current time) - samples_per_class: the number of samples to take during each iteration - channels_of_interest: tuple denoting the channels of interest to extract from the source image. (default: (1,2)) - kernel: size of kernel to load images of - stats_path: path to the vsi_stats.json file that contains normalization stats - normalization: {static, median, mean} select normalization mode, defaults to static example: params = dict(data_dir="/path/to/fish_training_data", split="train", tops=['image', 'label']) """ params = eval(self.param_str) self.data_dir = path.expanduser(params['data_dir']) self.split = params['split'] self.tops = params['tops'] self.kernel = params['kernel'] self.seed = params.get('seed', None) self.samples_per_class = params.get('samples_per_class', 1) self.channels_of_interest = params.get('channels_of_interest', (1,2)) self.stats_path = params.get('stats_path', None) self.normalization = params.get('normalization', 'static').lower() self.profiler = Profile() # store top data for reshape + forward self.data = {} # tops: check configuration if len(top) != len(self.tops): raise Exception("Need to define {} tops for all outputs.".format(len(self.tops))) # data layers have no bottoms if len(bottom) != 0: raise Exception("Do not define a bottom.") if self.stats_path is not None: self.stats_dict = json.load(open(self.stats_path)) elif self.normalization != 'static': raise Exception("stats_path must be specified for normalization modes other than static") index_path = path.join(self.data_dir, '{}_index.json'.format(self.split)) index = json.load(open(index_path)) self.source_image_mask_pairs = \ [(self.load_source_image(entry), self.load_mask(entry)) for entry in index.itervalues()] # load image indices for images and labels self.initialize_data_map() #if self.samples_per_class > len(self.unique_labels): #raise Exception("samples_per_class cannot exceed the number of instances of each class.") if self.samples_per_class < 1: raise Exception("samples_per_class cannot be less than 1.")
def __init_profiler( profiler_arg: bool, logger: logging.Logger) -> Tuple[Optional[Profile], Optional[str]]: """ Initialise profiling environment by creating a cprofile.Profiler instance, a folder where pstats can be dumped Args: profiler_arg: the value of profiler argument passed when running the command logger: a logger instance Returns: If profiling enabled, a tuple of profiler instance and profiling directory where the stats files would be dumped, otherwise, a tuple of nulls """ if profiler_arg: logger.info('Profiling mode enabled') logger.debug('Creating & enabling profiler ...') profiler = Profile() profiler.enable() logger.debug('Profiler created.') profiling_dir = os.path.join( PROFILING_DIR, f'{datetime.utcnow().strftime("%Y%m%d_%H%M%S_%f")}_{generate_random_string(10)}', ) try: os.makedirs(profiling_dir) logger.debug('Profiling directory "%s" created', profiling_dir) except OSError as ex: if ex.errno != errno.EEXIST: raise logger.debug('Profiling directory "%s" already exists', profiling_dir) return profiler, profiling_dir logger.info('Profiling mode not enabled') return None, None
def profiled_funk(*args, **kwargs): """wrapper funk""" profile = Profile() try: profile.enable() ret_val = funk(*args, **kwargs) profile.disable() finally: print("__CPROFILE__") profile.print_stats("cumulative") return ret_val
def test_profile(self): from cProfile import Profile from pstats import Stats # LOGIC p = Profile() p.runcall(_for_profile_function, 100_000, _create_mock_db) stats = Stats(p) stats.print_stats()
def start(self, name): """ Start a particular profile. """ if self.active_profile is None: if name not in self.profilers: self.profilers[name] = Profile() self.active_profile = self.profilers[name] self.active_profile_name = name self.active_profile.clear() self.active_profile.enable()
def main(): from ..logging.base import setup_context as setup_logging_context from ..logging.base import teardown as teardown_logging opts = None pr = None debug = False profile = False try: setup_logging_context() from ..utils.pickle import patch_nipype_unpickler patch_nipype_unpickler() from .parser import parse_args opts, should_run = parse_args() debug = opts.debug profile = opts.profile if profile is True: from cProfile import Profile pr = Profile() pr.enable() run(opts, should_run) except Exception as e: logger.exception("Exception: %s", e, exc_info=True) if debug: import pdb pdb.post_mortem() finally: if profile and pr is not None: pr.disable() if opts is not None: pr.dump_stats( Path(opts.workdir) / f"profile.{format_current_time():s}.prof") teardown_logging() # clean up orphan processes from ..utils.multiprocessing import terminate terminate()
def _profile(prof: cProfile.Profile = None, *, outfile: T.TextIO = sys.stdout) -> cProfile.Profile: """ Implement context manager protocol for cProfile.Profile. If using python version 3.8 or higher, use cProfile.Profile to context manager directly """ # noqa: E501 if prof is None: prof = cProfile.Profile() try: prof.enable() yield prof finally: prof.disable() sort_by = pstats.SortKey.CUMULATIVE ps = pstats.Stats(prof, stream=outfile).sort_stats(sort_by) ps.print_stats()
def decorator(*args, **kwargs): result = None if cumulative: global profiler else: profiler = Profile() try: result = profiler.runcall(func, *args, **kwargs) finally: if dump_stats: profiler.dump_stats(profile_filename) stats = pstats.Stats(profiler) conv = pyprof2calltree.CalltreeConverter(stats) with open(callgrind_filename, 'w') as fd: conv.output(fd) if print_stats: stats.strip_dirs().sort_stats(sort_stats).print_stats( print_stats) return result
def request_profiling_handler(request): profile[0].enable() try: return handler(request) finally: profile[0].disable() req[0] += 1 if req[0] >= num_profile: profile[0].print_stats("cumulative") req[0] = 0 profile[:] = [Profile()]
def update_data_profiling_wrapper(create_full, create_partial, disable_parallel_computing=False): if Profiling.ACTIVATED: logger.info("STARTED run with profiling") profiler = Profile() profiler.runctx( "update_data(create_full, create_partial, disable_parallel_computing)", locals(), globals()) relative_filename = 'profiling_results/create_index_run.kgrind' filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), relative_filename) convert(profiler.getstats(), filename) logger.info( "COMPLETED run with profiling: exported profiling result as %s", filename) else: logger.info("STARTED run without profiling") update_data(create_full, create_partial, disable_parallel_computing) logger.info("COMPLETED run without profiling")
def run(): gap_energy = BCSGapEnergy(1.5e-3, 4000) conductivity = MattisBardeenSuperconductorConductivity(gap_energy, 2.4e7) frequencies = linspace(10e9, 1500e9, 400) with Profile() as profile: for frequency in frequencies: conductivity.evaluate(4.2, frequency) profile.print_stats()
def wrapped(*args, **kwargs): """ Prints the profile stats and the memory usage to the decorated function """ mem_initial = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss profile = Profile() result = profile.runcall(func, *args, **kwargs) mem_final = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss _print_stats(profile, sort_type) print('Ran profile_with_memory_usage to %s' % func.__name__) print('Initial RAM: %s ' % _format_size(mem_initial)) print('Final RAM: %s' % _format_size(mem_final)) return result