def process_response(self, request, response): try: if can_profile(request): self.prof.close() out = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats('time', 'calls') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() if response and response.content and stats_str: response.content = "<pre>" + stats_str + "</pre>" response.content = "\n".join(response.content.split("\n")[:40]) response.content += self.summary_for_files(stats_str) os.unlink(self.tmpfile) except: pass return response
def process_response(self, request, response): if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.GET: self.prof.close() out = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats('time', 'calls') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() if response and response.content and stats_str: response.content = "<pre>" + stats_str + "</pre>" response.content = "\n".join(response.content.split("\n")[:40]) response.content += self.summary_for_files(stats_str) os.unlink(self.tmpfile) return response
def atexit(self): """Stop profiling and print profile information to sys.stderr. This function is registered as an atexit hook. """ self.profiler.close() funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** PROFILER RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) if self.skipped: skipped = "(%d calls not profiled)" % self.skipped else: skipped = "" print("function called %d times%s" % (self.ncalls, skipped)) print("") stats = hotshot.stats.load(self.logfilename) # hotshot.stats.load takes ages, and the .prof file eats megabytes, but # a saved stats object is small and fast if self.filename: stats.dump_stats(self.filename) # it is best to save before strip_dirs stats.strip_dirs() stats.sort_stats('cumulative', 'time', 'calls') stats.print_stats(40)
def process_response(self, request, response): if (settings.DEBUG or (hasattr(request, 'user') and request.user.is_superuser)) and SHOW_PROFILE_MAGIC_KEY in request.GET: self.prof.close() out = StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats('time', 'calls') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() if response and response.content and stats_str: response.content = "<html><pre>" + stats_str + "</pre></html>" response.content = "\n".join(response.content.split("\n")[:40]) response.content += self.summary_for_files(stats_str) os.unlink(self.tmpfile) response['Content-Type'] = 'text/html' return response
def __call__(self, *args): ##, **kw): kw unused import hotshot, hotshot.stats, os, tempfile ##, time already imported f, filename = tempfile.mkstemp() os.close(f) prof = hotshot.Profile(filename) stime = time.time() result = prof.runcall(self.func, *args) stime = time.time() - stime prof.close() import cStringIO out = cStringIO.StringIO() stats = hotshot.stats.load(filename) stats.stream = out stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(40) stats.print_callers() x = '\n\ntook '+ str(stime) + ' seconds\n' x += out.getvalue() # remove the tempfile try: os.remove(filename) except IOError: pass return result, x
def process_response(self, request, response): if self.show_profile(request): stats = self.stats() if 'prof_strip' in request.GET: stats.strip_dirs() if 'prof_sort' in request.GET: # See # http://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats # noqa # for the fields you can sort on. stats.sort_stats(*request.GET['prof_sort'].split(',')) else: stats.sort_stats('time', 'calls') # Capture STDOUT temporarily old_stdout = sys.stdout out = StringIO() sys.stdout = out stats.print_stats() stats_str = out.getvalue() sys.stdout.close() sys.stdout = old_stdout # Print status within PRE block if response and response.content and stats_str: response.content = "<pre>" + stats_str + "</pre>" return response
def prof_main(argv): import getopt import hotshot, hotshot.stats def usage(): print 'usage: %s module.function [args ...]' % argv[0] return 100 args = argv[1:] if len(args) < 1: return usage() name = args.pop(0) prof = name+'.prof' i = name.rindex('.') (modname, funcname) = (name[:i], name[i+1:]) module = __import__(modname, fromlist=1) func = getattr(module, funcname) if args: args.insert(0, argv[0]) prof = hotshot.Profile(prof) prof.runcall(lambda : func(args)) prof.close() else: stats = hotshot.stats.load(prof) stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(1000) return
def wrapped(*args, **kwds): """ Inner method for calling the profiler method. """ # define the profile name filename = os.path.join(path, '%s.prof' % func.__name__) # create a profiler for the method to run through prof = hotshot.Profile(filename) results = prof.runcall(func, *args, **kwds) prof.close() # log the information about it stats = hotshot.stats.load(filename) if stripDirs: stats.strip_dirs() # we don't want to know about the arguments for this method stats.sort_stats(*sorting) stats.print_stats(limit) # remove the file if desired if autoclean: os.remove(filename) return results
def process_response(self, request, response): if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.REQUEST: self.prof.close() out = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats('time', 'calls') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() info = self.get_debug_context(request) if response and response.content and stats_str: info = self.get_debug_context(request) response.content = "<pre>" + stats_str + "</pre>" + template.Template(DEBUG_TEMPLATE).render(template.Context(info)) response.content = "\n".join(response.content.split("\n")[:40]) response.content += self.summary_for_files(stats_str) response.content += template.Template(DEBUG_TEMPLATE).render(template.Context(info)) os.unlink(self.tmpfile) return response
def atexit(self): """Stop profiling and print profile information to sys.stderr. This function is registered as an atexit hook. """ self.profiler.close() funcname = self.fn.__name__ filename = self.fn.func_code.co_filename lineno = self.fn.func_code.co_firstlineno print print "*** PROFILER RESULTS ***" print "%s (%s:%s)" % (funcname, filename, lineno) print "function called %d times" % self.ncalls, if self.skipped: print "(%d calls not profiled)" % self.skipped else: print print stats = hotshot.stats.load(self.logfilename) if self.filename: stats.dump_stats(self.filename) stats.strip_dirs() stats.sort_stats('cumulative', 'time', 'calls') stats.print_stats(40)
def show_profile(stats): # stats.strip_dirs() stats.sort_stats(options['order']) # now capture the output out = cStringIO.StringIO() old_stdout = sys.stdout sys.stdout = out # Figure out the correct part of stats to call try: if options['output'] == 'callers': print " Callers of '" + options['data'] + "':" stats.print_callers(options['data'], options['limit']) elif options['output'] == 'callees': print " Functions that '" + options['data'] + "' call:" stats.print_callees(options['data'], options['limit']) else: # show stats print "Statistics: " stats.print_stats(options['limit']) except: print "Couldn't generate output. Possibly bad caller/callee pattern" # reset to defaults sys.stdout = old_stdout out.seek(0) parse_state = None; # keep track of where the 2nd column of functions start # we'll find this out from the header col2starts = 0 result = ""; for line in out: # funclist1: the first line of the function list if parse_state == 'funclist': function = line[0:col2starts].strip() subfunc = line[col2starts:].strip() if function: result += "\n" + function + "\n" result += " " + subfunc + "\n" # default parse_state, look for Function header elif line.startswith('Function'): if options['output'] == 'callers': col2starts = line.find('was called by') elif options['output'] == 'callees': col2starts = line.find('called') parse_state = 'funclist' else: result += line + "\n" # now spit out to less output_with_pager(result)
def process_response(self, request, response): if request.GET.has_key('prof'): h = hpy() mem_profile = h.heap() pd = ProfilerData( view = request.path, ) self.prof.close() out = StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile.name) #stats.strip_dirs() stats.sort_stats('cumulative') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() if response and response.content and stats_str: response.content = "<h1>Instance wide RAM usage</h1><pre>%s</pre><br/><br/><br/><h1>CPU Time for this request</h1><pre>%s</pre>" % ( mem_profile, stats_str ) pd.profile = "Instance wide RAM usage\n\n%s\n\n\nCPU Time for this request\n\n%s" % (mem_profile, stats_str) pd.save() return response
def run(): profilelevel = None if 'PROFILELEVEL' in environ: profilelevel = int(environ['PROFILELEVEL']) if profilelevel is None: TestProgramPyMVPA() else: profilelines = 'PROFILELINES' in environ import hotshot, hotshot.stats pname = "%s.prof" % sys.argv[0] prof = hotshot.Profile(pname, lineevents=profilelines) try: # actually return values are never setup # since unittest.main sys.exit's benchtime, stones = prof.runcall( unittest.main ) except SystemExit: pass print "Saving profile data into %s" % pname prof.close() if profilelevel > 0: # we wanted to see the summary right here # instead of just storing it into a file print "Loading profile data from %s" % pname stats = hotshot.stats.load(pname) stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(profilelevel)
def __call__(self, *args): ##, **kw): kw unused import hotshot, hotshot.stats, os, tempfile ##, time already imported f, filename = tempfile.mkstemp() os.close(f) prof = hotshot.Profile(filename) stime = time.time() result = prof.runcall(self.func, *args) stime = time.time() - stime prof.close() import cStringIO out = cStringIO.StringIO() stats = hotshot.stats.load(filename) stats.stream = out stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(40) stats.print_callers() def xx(): yield '\n\ntook '+ str(stime) + ' seconds\n' yield out.getvalue() # remove the tempfile try: os.remove(filename) except IOError: pass if result and not(hasattr(result, 'next') or hasattr(result, '__iter__')): result = [result] return itertools.chain(result, xx())
def interactive_mode(index, prof=0): while 1: line = raw_input("> ") try: if prof: prof = hotshot.Profile('tx.prof') resultset = prof.runctx('index._apply_index( {\'text\':{\'query\':line}})[0] ', globals(), locals()) prof.close() stats = hotshot.stats.load('tx.prof') stats.strip_dirs() stats.sort_stats('cumulative') stats.print_stats(100) else: resultset = index._apply_index( {'text':{'query':line}})[0] print "Result: %d matches" % len(resultset) lst = list(resultset.items()) lst.sort(lambda x,y : -cmp(x[1],y[1])) for docid,score in lst: print "%-2d %s %d" % (docid, docs[docid].getId(), score) except: traceback.print_exc()
def __call__(self, request): if 'prof' in request.GET: self.tmpfile = tempfile.NamedTemporaryFile() self.prof = hotshot.Profile(self.tmpfile.name) response = self.get_response(request) if 'prof' in request.GET: self.prof.close() out = StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile.name) # stats.strip_dirs() stats.sort_stats('cumulative', ) # stats.sort_stats('time', ) stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() if response and response.content and stats_str: response.content = "<pre>" + stats_str + "</pre>" return response
def process_response(self, request, response): if (settings.DEBUG or request.user.is_superuser) and request.GET.has_key('prof'): self.prof.close() out = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats('time', 'calls') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() #if response and response.content and stats_str: response.content = stats_str response['Content-Type'] = 'text/plain' # response.content = "\n".join(response.content.split("\n")[:40]) response.content += self.summary_for_files(stats_str) os.unlink(self.tmpfile) return response
def process_response(self, request, response): if (settings.DEBUG or request.user.is_superuser) and "prof" in request.GET: self.prof.close() out = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats("time", "calls") stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() if response and response.content and stats_str: response.content = "<pre>" + stats_str + "</pre>" response.content = "\n".join(response.content.split("\n")[:40]) response.content += self.summary_for_files(stats_str) os.unlink(self.tmpfile) response.content += "\n%d SQL Queries:\n" % len(connection.queries) response.content += pprint.pformat(connection.queries) return response
def process_response(self, request, response): if self.show_profiling(request): import hotshot.stats self.prof.close() out = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats("time", "calls") stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() if response and response.content and stats_str: response.content = "<pre>" + stats_str + "</pre>" response.content = "\n".join(response.content.split("\n")[:40]) response.content += self.summary_for_files(stats_str) os.unlink(self.tmpfile) return response
def profiled(*args, **kw): if (target not in profile_config['targets'] and not target_opts.get('always', None)): return fn(*args, **kw) elapsed, load_stats, result = _profile( filename, fn, *args, **kw) if not testlib.config.options.quiet: print "Profiled target '%s', wall time: %.2f seconds" % ( target, elapsed) report = target_opts.get('report', profile_config['report']) if report and testlib.config.options.verbose: sort_ = target_opts.get('sort', profile_config['sort']) limit = target_opts.get('limit', profile_config['limit']) print "Profile report for target '%s' (%s)" % ( target, filename) stats = load_stats() stats.sort_stats(*sort_) if limit: stats.print_stats(limit) else: stats.print_stats() os.unlink(filename) return result
def dumpProfileStats(): import hotshot.stats global profData print "dump profiling data" stats = hotshot.stats.load(profData) stats.sort_stats("cumulative") stats.print_stats()
def _profile(continuation): prof_file = 'populateDir.prof' try: import cProfile import pstats print('Profiling using cProfile') cProfile.runctx('continuation()', globals(), locals(), prof_file) stats = pstats.Stats(prof_file) except ImportError: import hotshot import hotshot.stats prof = hotshot.Profile(prof_file, lineevents=1) print('Profiling using hotshot') prof.runcall(continuation) prof.close() stats = hotshot.stats.load(prof_file) stats.strip_dirs() #for a in ['calls', 'cumtime', 'cumulative', 'ncalls', 'time', 'tottime']: for a in ['cumtime', 'time', 'ncalls']: print("------------------------------------------------------------------------------------------------------------------------------") try: stats.sort_stats(a) stats.print_stats(150) stats.print_callees(150) stats.print_callers(150) except KeyError: pass os.remove(prof_file)
def run_profile(self, slosl_statements, db_attributes, init_code): import hotshot, hotshot.stats, os, sys stderr = sys.stderr # discard runtime warning for tempnam sys.stderr = StringIO() prof_filename = os.tempnam(None, 'slow-') sys.stderr = stderr prof = hotshot.Profile(prof_filename) views = prof.runcall(self.build_views, slosl_statements, db_attributes, init_code) prof.close() stats = hotshot.stats.load(prof_filename) stats.strip_dirs() stdout = sys.stdout stats.sort_stats('time', 'calls') sys.stdout = StringIO() stats.print_stats() profile_data = sys.stdout.getvalue() stats.sort_stats('cumulative', 'calls') sys.stdout = StringIO() stats.print_stats() self.profile_data = (profile_data, sys.stdout.getvalue()) sys.stdout = stdout os.remove(prof_filename) return views
def process_response(self, request, response): content_type = response._headers.get('content-type', ('',''))[1] if settings.PROFILER and 'text/html' in content_type: #response.content = response.content.decode('utf-8') self.prof.close() out = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats('time', 'calls') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() report = '' if stats_str: report += stats_str #report += u"\n".join(response.content.split(u"\n")[:40]) report += self.summary_for_files(stats_str) os.unlink(self.tmpfile) context={'report':report} html = render_to_string('djangoprofiler/report.html',context) if '</body>' in response.content: response.content = response.content.replace('</body>', '%s</body>'%(html.encode('utf-8'))) else: response.content+=html.encode('utf-8') return response
def __call__(self, environ, start_response): profname = "%s.prof" % (environ['PATH_INFO'].strip("/").replace('/', '.')) profname = os.path.join(self.path, profname) prof = hotshot.Profile(profname) # prof.start() ret = prof.runcall(self.app, environ, start_response) prof.close() out = StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(profname) #stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() from uliweb.utils.textconvert import text2html text = text2html(stats_str) outputfile = profname + '.html' file(outputfile, 'wb').write(text) return ret
def process_response(self, request, response): from django.db import connection if (settings.DEBUG or request.user.is_superuser) and request.has_key('prof'): self.prof.close() out = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats('time', 'calls') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() if response and response.content and stats_str: response.content = "<pre>" + stats_str + "</pre>" response.content = "\n".join(response.content.split("\n")[:40]) response.content += self.summary_for_files(stats_str) response.content += '\n%d SQL Queries:\n' % len(connection.queries) response.content += pprint.pformat(connection.queries) response['content-type'] = 'text/html; charset=utf-8' os.unlink(self.tmpfile) return response
def process_view(self, request, view, *args, **kwargs): for item in request.META['QUERY_STRING'].split('&'): if item.split('=')[0] == 'profile': # profile in query string # catch the output, must happen before stats object is created # see https://bugs.launchpad.net/webpy/+bug/133080 for the details std_old, std_new = sys.stdout, StringIO.StringIO() sys.stdout = std_new # now let's do some profiling tmpfile = '/tmp/%s' % request.COOKIES['sessionid'] prof = hotshot.Profile(tmpfile) # make a call to the actual view function with the given arguments response = prof.runcall(view, request, *args[0], *args[1]) prof.close() # and then statistical reporting stats = hotshot.stats.load(tmpfile) stats.strip_dirs() stats.sort_stats('time') # do the output stats.print_stats(1.0) # restore default output sys.stdout = std_old # delete file os.remove(tmpfile) return HttpResponse('<pre\>%s</pre>' % std_new.getvalue()) return None
def report(self, stream): log.debug('printing profiler report') self.prof.close() stats = hotshot.stats.load(self.pfile) stats.sort_stats(self.sort) # 2.5 has completely different stream handling from 2.4 and earlier. # Before 2.5, stats objects have no stream attribute; in 2.5 and later # a reference sys.stdout is stored before we can tweak it. compat_25 = hasattr(stats, 'stream') if compat_25: tmp = stats.stream stats.stream = stream else: tmp = sys.stdout sys.stdout = stream try: if self.restrict: log.debug('setting profiler restriction to %s', self.restrict) stats.print_stats(*self.restrict) else: stats.print_stats() finally: if compat_25: stats.stream = tmp else: sys.stdout = tmp
def process_response(self, request, response): if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.REQUEST: self.prof.close() out = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = out stats = hotshot.stats.load(self.tmpfile) stats.sort_stats('time', 'calls') stats.print_stats() sys.stdout = old_stdout stats_str = out.getvalue() self.q_all = self.mysql_stat() - self.q_before - 1 response = HttpResponse() if stats_str: response.content = "<pre>" + stats_str + "</pre>" response.content = "\n".join(response.content.split("\n")[:40]) response.content += self.summary_for_files(stats_str) if self.q_all > -1: response.content += u"<pre>-----MySQL stats-----\n\nКоличество запросов к базе: %s \n\n</pre>"%self.q_all os.unlink(self.tmpfile) return response
def process_response(self, request, response): if self.show_profile(request): stats = self.stats() if 'prof_strip' in request.GET: stats.strip_dirs() if 'prof_sort' in request.GET: stats.sort_stats(*request.GET['prof_sort'].split(',')) else: stats.sort_stats('time', 'calls') # Capture STDOUT temporarily old_stdout = sys.stdout out = StringIO() sys.stdout = out stats.print_stats() stats_str = out.getvalue() sys.stdout.close() sys.stdout = old_stdout # Print status within PRE block if response and response.content and stats_str: response.content = "<pre>" + stats_str + "</pre>" return response
def main(): # Parse our command line options parser = OptionParser(usage='usage: %prog [options] dir1 dir2 ... dirN') parser.add_option('-c', '--config', dest='config', help='Specify a different config file location', ) parser.add_option('-f', '--files', dest='files', help='Assume all arguments are filenames instead of directories, and use SUBJECT as the base subject', metavar='SUBJECT', ) parser.add_option('-g', '--group', dest='group', help='Post to a different group than the default', ) # parser.add_option('-p', '--par2', # dest='generate_par2', # action='store_true', # default=False, # help="Generate PAR2 files in the background if they don't exist already.", # ) parser.add_option('-d', '--debug', dest='debug', action='store_true', default=False, help="Enable debug logging", ) parser.add_option('--profile', dest='profile', action='store_true', default=False, help='Run with the hotshot profiler (measures execution time of functions)', ) (options, args) = parser.parse_args() # No args? We have nothing to do! if not args: parser.print_help() sys.exit(1) # Make sure at least one of the args exists postme = [] post_title = None if options.files: post_title = options.files for arg in args: if os.path.isfile(arg): postme.append(arg) else: print 'ERROR: "%s" does not exist or is not a file!' % (arg) else: for arg in args: if os.path.isdir(arg): postme.append(arg) else: print 'ERROR: "%s" does not exist or is not a file!' % (arg) if not postme: print 'ERROR: no valid arguments provided on command line!' sys.exit(1) # Parse our configuration file if options.config: conf = ParseConfig(options.config) else: conf = ParseConfig() # Make sure the group is ok if options.group: if '.' not in options.group: newsgroup = conf['aliases'].get(options.group) if not newsgroup: print 'ERROR: group alias "%s" does not exist!' % (options.group) sys.exit(1) else: newsgroup = options.group else: newsgroup = conf['posting']['default_group'] # Strip whitespace from the newsgroup list to obey RFC1036 for c in (' \t'): newsgroup = newsgroup.replace(c, '') # And off we go poster = PostMangler(conf, options.debug) if options.profile: import hotshot prof = hotshot.Profile('profile.poster') prof.runcall(poster.post, newsgroup, postme, post_title=post_title) prof.close() import hotshot.stats stats = hotshot.stats.load('profile.poster') stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(25) else: poster.post(newsgroup, postme, post_title=post_title)
av = sys.argv[1:] if not av: main(av) firstarg = av[0].lower() if firstarg == "hotshot": import hotshot, hotshot.stats av = av[1:] prof_log_name = "XXXX.prof" prof = hotshot.Profile(prof_log_name) # benchtime, result = prof.runcall(main, *av) result = prof.runcall(main, *(av, )) print("result", repr(result)) prof.close() stats = hotshot.stats.load(prof_log_name) stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(20) elif firstarg == "profile": import cProfile av = av[1:] cProfile.run('main(av)', 'YYYY.prof') import pstats p = pstats.Stats('YYYY.prof') p.strip_dirs().sort_stats('cumulative').print_stats(30) elif firstarg == "psyco": PSYCO = 1 main(av[1:]) else: main(av)
def parse(): optParser = getOptParser() opts,args = optParser.parse_args() encoding = None try: f = args[-1] # Try opening from the internet if f.startswith('http://'): try: import urllib, cgi f = urllib.urlopen(f) contentType = f.headers.get('content-type') if contentType: (mediaType, params) = cgi.parse_header(contentType) encoding = params.get('charset') except: pass elif f == '-': f = sys.stdin else: try: # Try opening from file system f = open(f) except IOError: pass except IndexError: sys.stderr.write("No filename provided. Use -h for help\n") sys.exit(1) treebuilder = treebuilders.getTreeBuilder(opts.treebuilder) if opts.sanitize: tokenizer = sanitizer.HTMLSanitizer else: tokenizer = HTMLTokenizer p = html5parser.HTMLParser(tree=treebuilder, tokenizer=tokenizer) if opts.fragment: parseMethod = p.parseFragment else: parseMethod = p.parse if opts.profile: #XXX should import cProfile instead and use that import hotshot import hotshot.stats prof = hotshot.Profile('stats.prof') prof.runcall(parseMethod, f, encoding=encoding) prof.close() # XXX - We should use a temp file here stats = hotshot.stats.load('stats.prof') stats.strip_dirs() stats.sort_stats('time') stats.print_stats() elif opts.time: import time t0 = time.time() document = parseMethod(f, encoding=encoding) t1 = time.time() printOutput(p, document, opts) t2 = time.time() sys.stderr.write("\n\nRun took: %fs (plus %fs to print the output)"%(t1-t0, t2-t1)) else: document = parseMethod(f, encoding=encoding) printOutput(p, document, opts)
create_canvas(c) # state.subscribers.add(print_handler) # Start the main application create_window(c, "View created after") Gtk.main() if __name__ == "__main__": import sys if "-p" in sys.argv: print("Profiling...") import hotshot, hotshot.stats prof = hotshot.Profile("demo-gaphas.prof") prof.runcall(main) prof.close() stats = hotshot.stats.load("demo-gaphas.prof") stats.strip_dirs() stats.sort_stats("time", "calls") stats.print_stats(20) else: main() # vim: sw=4:et:
else: sys.stderr.write("Unknown action `%s`, aborting.\n" % cmdline.action) sys.exit(1) # try to print profiling information, but ignore failures # (profiling might not have been requested, in the first place...) try: pf.stop() logging.debug("Stopped call profiling, now dumping stats.") pf.close() import hotshot.stats stats = hotshot.stats.load(__name__ + '.pf') stats.strip_dirs() stats.sort_stats('cumulative', 'calls') stats.print_stats(50) except: pass # print CPU time usage cputime_s = resource.getrusage(resource.RUSAGE_SELF)[0] seconds = cputime_s % 60 minutes = int(cputime_s / 60) % 60 hours = int(cputime_s / 3600) % 24 days = int(cputime_s / 86400) if days > 0: elapsed = "%d days, %d hours, %d minutes and %2.3f seconds" % (days, hours, minutes, seconds) elif hours > 0: elapsed = "%d hours, %d minutes and %2.3f seconds" % (hours, minutes, seconds) elif minutes > 0:
'wd': opt.wd, 'momentum': opt.momentum, 'multi_precision': True }, initializer=mx.init.Xavier(magnitude=2)) mod.save_parameters('image-classifier-%s-%d-final.params' % (opt.model, opt.epochs)) else: if opt.mode == 'hybrid': net.hybridize() train(opt, context) if opt.builtin_profiler > 0: profiler.set_state('stop') print(profiler.dumps()) if __name__ == '__main__': if opt.profile: import hotshot, hotshot.stats prof = hotshot.Profile('image-classifier-%s-%s.prof' % (opt.model, opt.mode)) prof.runcall(main) prof.close() stats = hotshot.stats.load('image-classifier-%s-%s.prof' % (opt.model, opt.mode)) stats.strip_dirs() stats.sort_stats('cumtime', 'calls') stats.print_stats() else: main()
def main(): import gc gc.set_threshold(100000, 10, 10) # this makes a huge speed difference #gc.set_debug(gc.DEBUG_STATS) input_file = open(FLAGS.parallel_corpus[2]) if FLAGS.hypergraph is not None: try: os.mkdir(FLAGS.hypergraph) except OSError: sys.stderr.write("warning: directory %s already exists\n" % FLAGS.hypergraph) ffilename = FLAGS.parallel_corpus[0] efilename = FLAGS.parallel_corpus[1] ffile = open(ffilename) efile = open(efilename) if FLAGS.weightfiles is not None: fweightfile, eweightfile = FLAGS.weightfiles else: fweightfile = None eweightfile = None lexical_weighter = LexicalWeighter(fweightfile, eweightfile) maxlen = FLAGS.maxlen maxabslen = FLAGS.maxabslen tight_phrases = FLAGS.tight prev_time = start_time = time.time() slice = 1000 if profile: prof = hotshot.Profile("extractor.prof") prof.start() if logger.level >= 1: sys.stderr.write("(2) Extracting rules\n") count = 1 realcount = 0 slice = 1000 if FLAGS.pharaoh: alignments = alignment.Alignment.reader_pharaoh( ffile, efile, input_file) else: alignments = alignment.Alignment.reader(input_file) # bug: ignores -W option rule_dumper = RuleDumper() for i, a in enumerate(select(alignments), 1): a.lineno = count if logger.level >= 2: a.write(logger.file) a.write_visual(logger.file) etree = None # done reading all input lines realcount += 1 extractor = Extractor(maxabslen, maxlen, FLAGS.minhole, FLAGS.maxvars, FLAGS.forbid_adjacent, FLAGS.require_aligned_terminal, tight_phrases, FLAGS.remove_overlaps, lexical_weighter, FLAGS.keep_word_alignments, etree, FLAGS.etree_labels) rules = extractor.extract_rules(a) if logger.level >= 3: sys.stderr.write("Rules:\n") rules = list(rules) for r in rules: sys.stderr.write("%d ||| %s\n" % (realcount, r)) if False: rules = list(rules) for r in rules: sys.stderr.write("%d ||| %s ||| %f %f\n" % (realcount - 1, r, r.scores[1] / r.scores[0], r.scores[2] / r.scores[0])) #logger.writeln('%s rules extracted from sent %s' % (len(rules), i)) rule_dumper.add(rules) if logger.level >= 1 and count % slice == 0: sys.stderr.write("time: %f, sentences in: %d (%.1f/sec), " % (time.time() - start_time, count, slice / (time.time() - prev_time))) sys.stderr.write("rules out: %d+%d\n" % (rule_dumper.dumped, len(rule_dumper.gram))) prev_time = time.time() count += 1 rule_dumper.dump() if profile: prof.stop() prof.close() stats = hotshot.stats.load("extractor.prof") stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(100)
class HTMLParser(object): """ Fake parser to test tokenizer output """ def parse(self, stream, output=True): tokenizer = HTMLTokenizer(stream) for token in tokenizer: if output: print(token) if __name__ == "__main__": x = HTMLParser() if len(sys.argv) > 1: if len(sys.argv) > 2: import hotshot, hotshot.stats prof = hotshot.Profile('stats.prof') prof.runcall(x.parse, sys.argv[1], False) prof.close() stats = hotshot.stats.load('stats.prof') stats.strip_dirs() stats.sort_stats('time') stats.print_stats() else: x.parse(sys.argv[1]) else: print("""Usage: python mockParser.py filename [stats] If stats is specified the hotshots profiler will run and output the stats instead. """)
def main(): (options, args) = parseCmdLineOption() # Make sure at least one of the args exists postme = [] post_title = None if options.files: post_title = options.files for arg in args: if os.path.isfile(arg): postme.append(arg) else: print('ERROR: "%s" does not exist or is not a file!' % (arg)) else: for arg in args: if os.path.isdir(arg): postme.append(arg) else: print('ERROR: "%s" does not exist or is not a file!' % (arg)) if not postme: print('ERROR: no valid arguments provided on command line!') sys.exit(1) # Parse our configuration file if options.config: conf = ParseConfig(options.config) else: conf = ParseConfig() # Make sure the group is ok if options.group: if '.' not in options.group: newsgroup = conf['aliases'].get(options.group) if not newsgroup: print('ERROR: group alias "%s" does not exist!' % (options.group)) sys.exit(1) else: newsgroup = options.group else: newsgroup = conf['posting']['default_group'] # Strip whitespace from the newsgroup list to obey RFC1036 for c in (' \t'): newsgroup = newsgroup.replace(c, '') # And off we go poster = PostMangler(conf, options.debug) if options.profile: # TODO: replace by cProfile (PY3 compatibility) import hotshot prof = hotshot.Profile('profile.poster') prof.runcall(poster.post, newsgroup, postme, post_title=post_title) prof.close() import hotshot.stats stats = hotshot.stats.load('profile.poster') stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(25) else: poster.post(newsgroup, postme, post_title=post_title)
def process_view(self, request, callback, args, kwargs): # Create a profile, writing into a temporary file. filename = tempfile.mktemp() profile = hotshot.Profile(filename) try: try: # Profile the call of the view function. response = profile.runcall(callback, request, *args, **kwargs) # If we have got a 3xx status code, further # action needs to be taken by the user agent # in order to fulfill the request. So don't # attach any stats to the content, because of # the content is supposed to be empty and is # ignored by the user agent. if response.status_code // 100 == 3: return response # Detect the appropriate syntax based on the # Content-Type header. for regex, begin_comment, end_comment in COMMENT_SYNTAX: if regex.match( response['Content-Type'].split(';')[0].strip()): break else: # If the given Content-Type is not # supported, don't attach any stats to # the content and return the unchanged # response. return response # The response can hold an iterator, that # is executed when the content property # is accessed. So we also have to profile # the call of the content property. content = profile.runcall(response.__class__.content.fget, response) finally: profile.close() # Load the stats from the temporary file and # write them in a human readable format, # respecting some optional settings into a # StringIO object. stats = hotshot.stats.load(filename) if getattr(settings, 'PROFILE_MIDDLEWARE_STRIP_DIRS', False): stats.strip_dirs() if getattr(settings, 'PROFILE_MIDDLEWARE_SORT', None): stats.sort_stats(*settings.PROFILE_MIDDLEWARE_SORT) stats.stream = StringIO() stats.print_stats( *getattr(settings, 'PROFILE_MIDDLEWARE_RESTRICTIONS', [])) finally: os.unlink(filename) # Construct an HTML/XML or Javascript comment, with # the formatted stats, written to the StringIO object # and attach it to the content of the response. comment = '\n%s\n\n%s\n\n%s\n' % ( begin_comment, stats.stream.getvalue().strip(), end_comment) response.content = content + comment # If the Content-Length header is given, add the # number of bytes we have added to it. If the # Content-Length header is ommited or incorrect, # it remains so in order to don't change the # behaviour of the web server or user agent. if response.has_header('Content-Length'): response['Content-Length'] = int( response['Content-Length']) + len(comment) return response
def start(self): super(Web, self).start() # Steps taken from http://www.faqs.org/faqs/unix-faq/programmer/faq/ # Section 1.7 if self.options.ensure_value("fork", None): # fork() so the parent can exit, returns control to the command line # or shell invoking the program. if os.fork(): os._exit(0) # setsid() to become a process group and session group leader. os.setsid() # fork() again so the parent, (the session group leader), can exit. if os.fork(): os._exit(0) # chdir() to esnure that our process doesn't keep any directory in # use that may prevent a filesystem unmount. import deluge.configmanager os.chdir(deluge.configmanager.get_config_dir()) if self.options.pidfile: open(self.options.pidfile, "wb").write("%d\n" % os.getpid()) if self.options.ensure_value("group", None): if not self.options.group.isdigit(): import grp self.options.group = grp.getgrnam(self.options.group)[2] os.setuid(self.options.group) if self.options.ensure_value("user", None): if not self.options.user.isdigit(): import pwd self.options.user = pwd.getpwnam(self.options.user)[2] os.setuid(self.options.user) import server self.__server = server.DelugeWeb() if self.options.base: self.server.base = self.options.base if self.options.port: self.server.port = self.options.port if self.options.ensure_value("ssl", None): self.server.https = self.options.ssl if self.options.profile: import hotshot hsp = hotshot.Profile( deluge.configmanager.get_config_dir("deluge-web.profile")) hsp.start() self.server.install_signal_handlers() self.server.start() if self.options.profile: hsp.stop() hsp.close() import hotshot.stats stats = hotshot.stats.load( deluge.configmanager.get_config_dir("deluge-web.profile")) stats.strip_dirs() stats.sort_stats("time", "calls") stats.print_stats(400)
def start_daemon(): """Entry point for daemon script""" import mirror.common mirror.common.setup_translations() # Fix ?? problem if redirect `mirrord -h` to file or pipe to other command if not sys.stdout.isatty() and not mirror.common.is_python3(): reload(sys) sys.setdefaultencoding('utf-8') # Setup the argument parser parser = OptionParser(usage="%prog [options]") parser.add_option("-v", "--version", action="callback", callback=version_callback, help=_("Show program's version number and exit")) parser.add_option("-D", "--do-not-daemonize", dest="donot", help=_("Do not daemonize (default is daemonize)"), action="store_true", default=False) parser.add_option("-c", "--config", dest="config", help=_("Set the config location directory"), action="store", type="str") parser.add_option("-P", "--pidfile", dest="pidfile", help=_("Use pidfile to store process id"), action="store", type="str") parser.add_option("-u", "--user", dest="user", help=_("User to switch to. Need to start as root"), action="store", type="str") parser.add_option("-g", "--group", dest="group", help=_("Group to switch to. Need to start as root"), action="store", type="str") parser.add_option("-l", "--logfile", dest="logfile", help=_("Set the logfile location"), action="store", type="str") parser.add_option("-L", "--loglevel", dest="loglevel", help=_("Set the log level: none, info, warning, error, " "critical, debug"), action="store", type="str") parser.add_option( "-q", "--quiet", dest="quiet", help=_("Sets the log level to 'none', this is the same as `-L none`"), action="store_true", default=False) parser.add_option("-r", "--rotate-logs", dest="rotate_logs", help=_("Rotate logfiles."), action="store_true", default=False) parser.add_option("--profile", dest="profile", help=_("Profiles the daemon"), action="store_true", default=False) parser.add_option("-t", "--tasks", dest="list_tasks", help=_("List current tasks in scheduler's queue"), action="store_true", default=False) parser.add_option("-s", "--signal", dest="signal", help=_("Send signal to mirrord: stop, reload"), action="store", type="str") # Get the options and args from the OptionParser (options, args) = parser.parse_args() if options.list_tasks: sys.exit(mirror.console.list_task_queue()) if options.signal: sys.exit(mirror.console.signal_process(options.signal)) if options.quiet: options.loglevel = "none" if not options.loglevel: options.loglevel = "info" logfile_mode = 'w' if options.rotate_logs: logfile_mode = 'a' import mirror.configmanager if options.config: if not mirror.configmanager.set_config_dir(options.config): write_stderr( "There was an error setting the config dir! Exiting.."), sys.exit(1) # Check if config dir exists config_dir = mirror.configmanager.get_config_dir() if not os.path.isdir(config_dir): write_stderr( _("Config dir does not exist: %s, please create and write your mirror.ini" ), config_dir) sys.exit(1) # Check if main config file exists config_file = mirror.configmanager.get_config_dir('mirror.ini') if not os.path.isfile(config_file): write_stderr(_("Config file does not exist: %s, please write one"), config_file) sys.exit(1) # Sets the options.logfile to point to the default location def set_logfile(): if not options.logfile: options.logfile = os.path.join( mirror.common.DEFAULT_MIRRORD_LOG_DIR, "mirrord.log") set_logfile() # Setup the logger try: log_dir = os.path.abspath(os.path.dirname(options.logfile)) # Try to make the logfile's directory if it doesn't exist if not os.path.isdir(log_dir): os.makedirs(log_dir) except: write_stderr( _("There was an error creating log dir: %s, you can create it manually and start again." ), log_dir) sys.exit(1) if not os.access(log_dir, os.W_OK): write_stderr( _("There was an error writing logs to log dir: %s, " "you can change it manually (chown or chmod ) and start again."), log_dir) sys.exit(1) task_log_dir = mirror.common.DEFAULT_TASK_LOG_DIR if not os.path.isdir(task_log_dir): write_stderr( _("Default task log dir does not exists: %s, you can create it manually and start again." ), task_log_dir) sys.exit(1) if not os.access(task_log_dir, os.W_OK): write_stderr( _("There was an error writing logs to log dir: %s, " "you can change it manually (chown or chmod) and start again."), task_log_dir) sys.exit(1) # Setup the logger if os.path.isfile(options.logfile): logfile_mode = 'a' mirror.log.setupLogger(level=options.loglevel, filename=options.logfile, filemode=logfile_mode) if options.donot: mirror.log.addStreamHandler(level=options.loglevel) # Writes out a pidfile if necessary def write_pidfile(): if options.pidfile: open(options.pidfile, "wb").write("%s\n" % os.getpid()) # If the do not daemonize is set, then we just skip the forking if not options.donot: if os.fork(): # We've forked and this is now the parent process, so die! os._exit(0) os.setsid() # Do second fork if os.fork(): os._exit(0) # Change to root directory os.chdir("/") # Write pid file before change gid and uid write_pidfile() if options.group: if not options.group.isdigit(): import grp options.group = grp.getgrnam(options.group)[2] os.setgid(options.group) if options.user: if not options.user.isdigit(): import pwd options.user = pwd.getpwnam(options.user)[2] os.setuid(options.user) # Redirect stdin, stdout, stderr to /dev/null ... # if mirrord is running as daemon if not options.donot: fp = open("/dev/null", 'r+') os.dup2(fp.fileno(), sys.stdin.fileno()) os.dup2(fp.fileno(), sys.stdout.fileno()) os.dup2(fp.fileno(), sys.stderr.fileno()) fp.close() import logging log = logging.getLogger(__name__) try: mirror.common.check_mirrord_running( mirror.configmanager.get_config_dir("mirrord.pid")) # return fp to keep file not closed (by __exit__()), so the lock will not get released # we also write pid into it fp = mirror.common.lock_file( mirror.configmanager.get_config_dir("mirrord.pid")) except mirror.error.MirrordRunningError as e: log.error(e) log.error( "You cannot run multiple daemons with the same config directory set." ) sys.exit(1) except Exception as e: log.exception(e) sys.exit(1) import mirror.handler signal.signal(signal.SIGTERM, mirror.handler.shutdown_handler) signal.signal(signal.SIGQUIT, mirror.handler.shutdown_handler) signal.signal(signal.SIGINT, mirror.handler.shutdown_handler) signal.signal(signal.SIGCHLD, mirror.handler.sigchld_handler) signal.signal(signal.SIGHUP, mirror.handler.reload_handler) if options.profile: import hotshot hsp = hotshot.Profile( mirror.configmanager.get_config_dir("mirrord.profile")) hsp.start() try: log.info("Starting mirror daemon...") from mirror.daemon import MirrorDaemon daemon = MirrorDaemon(options, args) daemon.start() except Exception as e: log.exception(e) sys.exit(1) finally: if options.profile: hsp.stop() hsp.close() import hotshot.stats stats = hotshot.stats.load( mirror.configmanager.get_config_dir("mirrord.profile")) stats.strip_dirs() stats.sort_stats("time", "calls") stats.print_stats(400)
def run(self): print('Starting Gaphor...') if self.model: print('Starting with model file', self.model) for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) # if self.build_lib not in sys.path: # sys.path.insert(0, self.build_lib) # os.environ['GAPHOR_DATADIR'] = os.path.abspath('data') if self.coverage: import coverage coverage.start() if self.command: print('Executing command: %s...' % self.command) exec (self.command) elif self.doctest: print('Running doctest cases in module: %s...' % self.doctest) import imp # use zope's one since it handles coverage right from zope.testing import doctest # Figure out the file: f = os.path.join(*self.doctest.split('.')) + '.py' fp = open(f) # Prepend module's package path to sys.path pkg = os.path.join(self.build_lib, *self.doctest.split('.')[:-1]) # if pkg: # sys.path.insert(0, pkg) # print 'Added', pkg, 'to sys.path' # Load the module as local module (without package) test_module = imp.load_source(self.doctest.split('.')[-1], f, fp) failure, tests = doctest.testmod(test_module, name=self.doctest, optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE) if self.coverage: print() print('Coverage report:') coverage.report(f) sys.exit(failure != 0) elif self.unittest: # Running a unit test is done by opening the unit test file # as a module and running the tests within that module. print('Running test cases in unittest file: %s...' % self.unittest) import imp, unittest fp = open(self.unittest) test_module = imp.load_source('gaphor_test', self.unittest, fp) test_suite = unittest.TestLoader().loadTestsFromModule(test_module) # test_suite = unittest.TestLoader().loadTestsFromName(self.unittest) test_runner = unittest.TextTestRunner(verbosity=self.verbosity) result = test_runner.run(test_suite) if self.coverage: print() print('Coverage report:') coverage.report(self.unittest) sys.exit(not result.wasSuccessful()) elif self.file: print('Executing file: %s...' % self.file) dir, f = os.path.split(self.file) print('Extending PYTHONPATH with %s' % dir) # sys.path.append(dir) exec (compile(open(self.file).read(), self.file, 'exec'), {}) else: print('Launching Gaphor...') del sys.argv[1:] starter = load_entry_point('gaphor==%s' % (self.distribution.get_version(),), 'console_scripts', 'gaphor') if self.profile: print('Enabling profiling...') try: import cProfile import pstats prof = cProfile.Profile() prof.runcall(starter) prof.dump_stats('gaphor.prof') p = pstats.Stats('gaphor.prof') p.strip_dirs().sort_stats('time').print_stats(20) except ImportError as ex: import hotshot, hotshot.stats prof = hotshot.Profile('gaphor.prof') prof.runcall(starter) prof.close() stats = hotshot.stats.load('gaphor.prof') stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(20) else: starter()
def print_stats(): stats = hotshot.stats.load("profile.log") stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats()