def _profile_test(): # runs _test() with profiling, storing results in "norm.profile". # To see a profile, run e.g. # python -c 'import pstats; pstats.Stats("norm.profile").strip_dirs().sort_stats("time").print_stats()' | less import cProfile import os.path cProfile.run('_test()', 'norm.profile')
def main(argv=None, interactive=True): """ Entry point for setup.py. Wrapper for a profiler if requested otherwise just call run() directly. If profiling is enabled we disable interactivity as it would wait for user input and influence the statistics. However the -r option still works. """ # catch and ignore a NumPy deprecation warning with warnings.catch_warnings(record=True): warnings.filterwarnings( "ignore", 'The compiler package is deprecated and removed in ' 'Python 3.x.', DeprecationWarning) np.safe_eval('1') if '-p' in sys.argv or '--profile' in sys.argv: try: import cProfile as Profile except ImportError: import Profile Profile.run('from obspy.scripts.runtests import run; run()', 'obspy.pstats') import pstats stats = pstats.Stats('obspy.pstats') print() print("Profiling:") stats.sort_stats('cumulative').print_stats('obspy.', 20) print(PSTATS_HELP) else: errors = run(argv, interactive) if errors: sys.exit(1)
def profile_without_initializing_rhymechecker(): #N.B. these don't, work, jus tcopy/paste them into the if name = main block import cProfile import pstats r = RhymeChecker() cProfile.run("poem_ex_nihilo(rhyme_checker=r, format='haiku', input_text=None, be_random=False)", 'binaryprofile.prof', 'tottime') pstats.Stats('binaryprofile.prof').sort_stats('tottime').print_stats(.1)
def timeit(): f = 'core(osgrubber().returnall()).returnall()' import cProfile cProfile.run(f) import timeit t = timeit.Timer(f, 'from __main__ import osgrubber, core') print(t.timeit(number=100))
def main(): # 'main' method to run script from command line start = time.time() args = get_args() verbose = args['verbose'] if verbose: print "Starting prepareThresholds.py" # validate arguments if args['ztotal']<1 or args['zstart']<1: raise ValueError("Invalid zstart or ztotal option.") parserExtra = ArgParserExtra(args) args = parserExtra.validateInputs(['egt', 'config']) args = parserExtra.validateOutputDir() profile = parserExtra.enableProfile() if profile: pstats = NamedTemporaryFile(prefix="prepareThresholds_", suffix=".pstats", dir=args['out'], delete=False).name cmd = "ThresholdFinder('"+args['egt']+"', '"+args['config']+\ "').runMultiple(%s, %s, '%s', %s, %s)" % \ (args['zstart'], args['ztotal'], args['out'], args['verbose'], args['force']) cProfile.run(cmd, pstats) else: tf = ThresholdFinder(args['egt'], args['config']) tf.runMultiple(args['zstart'], args['ztotal'], args['out'], args['verbose'], args['force']) duration = time.time() - start if verbose: print "Finished. Duration:", round(duration, 1), "s"
def open_file(fn=None): from backends.perf import detect_perf from backends.ftrace import detect_ftrace from backends.dummy import detect_dummy from backends.trace_cmd import detect_tracecmd if fn == None: fn = open_dialog() if fn == None: return 0 parser = None for func in detect_ftrace, detect_perf, detect_tracecmd, detect_dummy: parser = func(fn) if parser: break if prof: import cProfile cProfile.run('proj = parser(fn)','timechart_load.prof') elif fn: proj = parser(fn) if proj: # Create and open the main window. window = tcWindow(project = proj) window._ui = window.edit_traits() # Traits has the bad habbit of autoselecting the first row in the table_editor. Workaround this. proj.selected = [] return 1 return 0
def main(): """Parse args and run main daemon function :return: None """ parser = optparse.OptionParser( "%prog [options]", version="%prog " + __version__) parser.add_option('-c', '--config', dest="config_file", metavar="INI-CONFIG-FILE", help='Config file') parser.add_option('-d', '--daemon', action='store_true', dest="is_daemon", help="Run in daemon mode") parser.add_option('-r', '--replace', action='store_true', dest="do_replace", help="Replace previous running poller") parser.add_option('--debugfile', dest='debug_file', help=("Debug file. Default: not used " "(why debug a bug free program? :) )")) parser.add_option("-p", "--profile", dest="profile", help="Dump a profile file. Need the python cProfile librairy") opts, args = parser.parse_args() if args: parser.error("Does not accept any argument.") daemon = Poller(debug=opts.debug_file is not None, **opts.__dict__) if not opts.profile: daemon.main() else: # For perf tuning: import cProfile cProfile.run('''daemon.main()''', opts.profile)
def profile_leo(): """Gather and print statistics about Leo""" # Work around a Python distro bug: can fail on Ubuntu. try: import pstats except ImportError: g.es_print('can not import pstats: this is a Python distro bug') g.es_print('https://bugs.launchpad.net/ubuntu/+source/python-defaults/+bug/123755') g.es_print('try installing pstats yourself') return import cProfile as profile import leo.core.leoGlobals as g import os theDir = os.getcwd() # On Windows, name must be a plain string. An apparent cProfile bug. name = str(g.os_path_normpath(g.os_path_join(theDir, 'leoProfile.txt'))) print('profiling to %s' % name) profile.run('import leo ; leo.run()', name) p = pstats.Stats(name) p.strip_dirs() # p.sort_stats('module','calls','time','name') p.sort_stats('cumulative', 'time') #reFiles='leoAtFile.py:|leoFileCommands.py:|leoGlobals.py|leoNodes.py:' #p.print_stats(reFiles) p.print_stats()
def profile(code, name='profile_run', sort='cumulative', num=30): """Common-use for cProfile""" cProfile.run(code, name) stats = pstats.Stats(name) stats.sort_stats(sort) stats.print_stats(num) return stats
def main(): n = 10**8 pylist = range(n) # Basic verification that both methods have the same effect. #v1 = PyListToTIntV(pylist) #assert v1.Len() == n #v2 = SwigPyList(pylist) #assert v2.Len() == n #assert v1 == v2 # Doesn't appear to check #print "Converted %d values\n" % v2.Len() setup = """\ import snap from __main__ import PyListToTIntV, SwigPyList pylist = range(%d) """ % n print "Method 1: Python-to-SNAP conversion:" print "-" * 50 s1 = "v1 = PyListToTIntV(pylist)" # print timeit.timeit(setup=setup, stmt=s1, number=5)/5.0 cProfile.run(setup + s1) print "Method 2: Python-to-SNAP conversion:" print "-" * 50 s2 = "v2 = SwigPyList(pylist)" # print timeit.timeit(setup=setup, stmt=s2, number=5)/5.0 cProfile.run(setup + s2)
def profile_features(): utc_datetime = datetime.datetime.utcnow() utc_datetime.strftime("%Y-%m-%d-%H%MZ") profile_log = 'profile_features_' + str(utc_datetime) +".prof" cProfile.run('run_foo()', profile_log)
def _profile(): global result prepare() cProfile.run('run()', 'profile') p = pstats.Stats('profile') p.sort_stats('cumulative').print_stats(50) p.print_callees(20)
def main(interactive=True): """ Entry point for setup.py. Wrapper for a profiler if requested otherwise just call run() directly. If profiling is enabled we disable interactivity as it would wait for user input and influence the statistics. However the -r option still works. """ if '-p' in sys.argv or '--profile' in sys.argv: try: import cProfile as Profile except ImportError: import Profile Profile.run('from obspy.core.scripts.runtests import run; run()', 'obspy.pstats') import pstats stats = pstats.Stats('obspy.pstats') print print "Profiling:" stats.sort_stats('cumulative').print_stats('obspy.', 20) print PSTATS_HELP else: errors = run(interactive) if errors: sys.exit(1)
def run(self, args=None): ns = self.parse_arguments(args) if ns.test: self.run_tests(ns.test) return if ns.infile is None: input_iter = raw_input_iter() else: import fileinput input_iter = fileinput.input(ns.infile, mode='rU') cases = test_case_iter(input_iter, self.lines_per_case) if ns.profile: import cProfile profile_file = None if ns.profile == 'stdout' else ns.profile cProfile.run('self.execute_serially(enumerate(cases, 1))', profile_file) else: if ns.threads > 1: self.threads = ns.threads executor = self.execute_threaded else: executor = self.execute_serially results = executor(enumerate(cases, 1)) outfile = sys.stdout if ns.outfile is None else open(ns.outfile, 'w') with outfile: for i, r in enumerate(results, 1): self.print_result(i, r, outfile)
def main(): path = '/tmp/stats' cProfile.run('run()',path) p = pstats.Stats(path) # p.sort_stats('time').print_stats(30) p.print_stats('get_numpy_data') import ipdb;ipdb.set_trace()
def RunWithProfiler(functionStr, outputpath=None): import cProfile import pstats import sys if outputpath is None: outputpath = "C:\\Temp" ProfilePath = os.path.join(outputpath, 'BuildProfile.pr') ProfileDir = os.path.dirname(ProfilePath) if not os.path.exists(ProfileDir): os.makedirs(ProfileDir) logger = logging.getLogger(__name__ + '.RunWithProfiler') logger.info("Profiling: " + functionStr) try: cProfile.run(functionStr, ProfilePath) finally: if not os.path.exists(ProfilePath): logger.error("No profile file found" + ProfilePath) sys.exit() pr = pstats.Stats(ProfilePath) if not pr is None: pr.sort_stats('time') print(str(pr.print_stats(.1))) logger.info(str(pr.print_stats(0.1))) pr.print_callers(.1)
def main(): cProfile.run( 'apply2.aggregate([1, 2, 3, 4, 5, 2, 3, 4, 5, 2, 3, 4, 5, 2, 3, 4, 5, 2, 3, 4, 5], [1, 2, 3, 4, 5, 2, 3, 4, 5, 2, 3, 4, 5, 2, 3, 4, 5, 2, 3, 4, 5], "sum")', "profile.prof", ) s = pstats.Stats("profile.prof") s.strip_dirs().sort_stats("time").print_stats()
def main(): options, args = parser.parse_args() if options.mysql: kw = {} for arg in options.mysql: name, value = arg.split('=', 1) kw[name] = value kw['db'] = args[0] loader = MySQLStorage(**kw) else: dir = args[0] if not os.path.exists(dir): print 'Creating %s' % dir os.makedirs(dir) if options.stream: DatabaseConstructor = StreamDatabase else: DatabaseConstructor = Database def loader(name): return DatabaseConstructor(os.path.join(dir, name + '.db')) runner = Runner(db_count=options.db_count, read_portion=options.read, large_read_portion=options.large_read, db_loader=loader) runner.preload(options.preload) if options.profile: import cProfile cProfile.run('runner.run_many(options.reps, options.times)') else: runner.run_many(options.reps, options.times) runner.counter.summarize()
def profile(func, *args, **keywordArgs): """ Profile a function call, if profiling is enabled. Change a normal function call f(a, b, c=3) into profile(f, a, b, c=3), and f will be profiled. A method call can also be profiled: o.f(a) becomes profile(o.f, a). Profiling is enabled by setting DO_PROFILE = True in utilities.debug. Otherwise, the function is called without profiling. Fancier schemes, like profiling the Nth call of a function could be implemented here, if desired. """ global _profile_function global _profile_args global _profile_keywordArgs global DO_PROFILE _profile_function = func _profile_args = args _profile_keywordArgs = keywordArgs if (DO_PROFILE): import cProfile print "Capturing profile..." cProfile.run('from utilities.debug import _run_profile; _run_profile()', _profile_output_file) print "...end of profile capture" else: _run_profile() _profile_function = None _profile_args = None _profile_keywordArgs = None
def main(): import sys # Possibly temp. addons path from os.path import join, dirname, normpath sys.path.append(normpath(join(dirname(__file__), "..", "..", "addons", "modules"))) sys.path.append(join(utils.user_resource('SCRIPTS'), "addons", "modules")) # fake module to allow: # from bpy.types import Panel sys.modules["bpy.app"] = app sys.modules["bpy.app.handlers"] = app.handlers sys.modules["bpy.app.translations"] = app.translations sys.modules["bpy.types"] = types #~ if "-d" in sys.argv: # Enable this to measure start up speed if 0: import cProfile cProfile.run("import bpy; bpy.utils.load_scripts()", "blender.prof") import pstats p = pstats.Stats("blender.prof") p.sort_stats("cumulative").print_stats(100) else: utils.load_scripts()
def func_wrapper(*args, **kwargs): print 1 cProfile.run(func.__name__, 'restats') p = pstats.Stats('restats') p.sort_stats('cumulative').print_stats(10)
def worker(options): """ Start background worker process. """ workerPid = os.getpid() if not options.noaffinity: p = psutil.Process(workerPid) print "affinity [before]", p.cpu_affinity() p.cpu_affinity([options.cpuid]) print "affinity [after]", p.cpu_affinity() factory = EchoServerFactory(options.wsuri) # The master already created the socket, just start listening and accepting ## reactor.adoptStreamPort(options.fd, AF_INET, factory) if not options.silence: print "Worker started on PID %s using factory %s and protocol %s" % (workerPid, factory, factory.protocol) # print "Worker %d PYPYLOG=%s" % (workerPid, os.environ.get('PYPYLOG', None)) if options.profile: statprof.reset(PROFILER_FREQ) statprof.start() if not options.silence: def stat(): if options.profile: statprof.stop() output = StringIO.StringIO() output.write("-" * 80 + "\n") output.write("Worker Statistics (PID %s)\n\n%s" % (workerPid, factory.stats.stats())) if options.profile: output.write("\n") # format = statprof.DisplayFormats.ByLine # format = statprof.DisplayFormats.ByMethod # statprof.display(output, format = format) statprof.display(output) output.write("-" * 80 + "\n\n") sys.stdout.write(output.getvalue()) if options.profile: statprof.reset(PROFILER_FREQ) statprof.start() reactor.callLater(options.interval, stat) reactor.callLater(options.interval, stat) if False: import cProfile print "RUNNING cProfile" cProfile.run('reactor.run()') else: reactor.run()
def main(top): top = os.path.abspath(top) if not exists(pj(top, "abipy")) or not exists(pj(top, "setup.py")): raise ValueError("top %s is not the top-level abipy directory" % top) os.chdir(top) sys.path.insert(0, top) import cProfile import pstats prof_file = pj(top, ".abipy_prof") # Profile the import of the package cProfile.run('abipy_import()', prof_file) p = pstats.Stats(prof_file) #If you were looking to see what functions were looping a lot, and taking a lot of time, you would do: p.sort_stats('time').print_stats(10) #This will sort all the statistics by file name, and then print out statistics for only the class init methods #(since they are spelled with #__init__ in them). #p.sort_stats('file').print_stats('__init__') return 0
def profile(argv=argv): from cProfile import run count, subtree, read_all = argv[1:] filename = '.'.join(argv) print(filename) run("from infi.wmpio.scripts import walk; walk(%s, %s, %s)" % \ (int(count), int(subtree), int(read_all)), filename)
def profile(run_this='generate_cases(1, 90, True, True)'): cProfile.run(run_this, 'foo.prof') print '-' * 80 print '-' * 80 print '-' * 80 p = pstats.Stats('foo.prof') p.strip_dirs().sort_stats(-1).print_stats()
def profile(profiled): results = tempfile.mktemp(suffix='.out', prefix='pybot-profile', dir=join(rootdir, 'tmp')) cProfile.run(profiled, results) stats = pstats.Stats(results) stats.sort_stats('cumulative').print_stats(50) os.remove(results)
def _func(*args, **kwargs): profile_file_path = os.path.join(os.path.dirname(__file__), '%s_data.txt'%func.__name__) tuple_par_lst = [] for par_index, par in enumerate(args): __main__.__dict__['par_%s'%par_index] = par tuple_par_lst.append('par_%s'%par_index) tuple_par_str = ','.join(tuple_par_lst) kw_par_lst = [] for key, value in kwargs.items(): key_value_name = 'key_value_name_%s'%key __main__.__dict__[key_value_name] = value kw_par_lst.append("%s=%s"%(key, key_value_name)) kw_par_str = ','.join(kw_par_lst) func_name = 'test_%s'%func.__name__ __main__.__dict__[func_name] = func run_str = "%s(%s, %s)"%(func_name, tuple_par_str, kw_par_str) profile.run(run_str, profile_file_path) statpf = open(os.path.join(os.path.dirname(__file__), "%s_statics.txt"%func.__name__), 'w+') p = pstats.Stats(profile_file_path, stream=statpf) p.strip_dirs().sort_stats("cumulative").print_stats() statpf.close()
def profileRenderSampleScene_01( size = None, raytracer = None, world = None, logger = None, outfile = "simple1.bmp" ): import cProfile cProfile.run( 'renderSampleScene_01( size, outfile )', 'simple1_prof.txt' )
def test_sort(): def do_csort(): global rv rv = sorted(test_list,shorter.sort) return rv def do_pysort(): global rv def py_sorter(a,b): n = cmp(a.value,b.value) if n == 0: n = cmp(a.value2,b.value2) return n rv = sorted(test_list,py_sorter) cProfile.run('do_csort()',"Csort") p = pstats.Stats('Csort') p.strip_dirs().sort_stats(-1).print_stats() cProfile.run('do_pysort()',"Pysort") p = pstats.Stats('Pysort') p.strip_dirs().sort_stats(-1).print_stats() print rv[:10]
def run (self): app = BonsaiServer (self.context) opts, args = self.context.OptionParser ().parse_args () filename = abspath (self.opts.pidFile) try: pid = getPidFromFile (filename) if checkIfPidRunning (pid): print "A process %s is alredy running using lock file %s. Nothing is done." % (pid, filename) sys.exit (0) else: print "Process %s died unexpectedly. Removing lockfile %s." % (pid, filename) os.unlink (filename) except IOError: # TODO: this should be a warning. g_Logger.trace ("File %s does not exists. Will be created." % filename) open (filename, 'w').write (str (os.getpid ())) self.context.addService (CmdLineArgs (self.context.OptionParser ())) self.context.addService (Cfg ()) if opts.profile: import pstats try: import cProfile as profile except ImportError: import profile profile.run ('app.start ()', 'bonsaiProfiler') p = pstats.Stats ('bonsaiProfiler') p.strip_dirs().sort_stats (-1).print_stats() else: app.start ()
ifb.save(opts.outfile) print("outfile = %s" % opts.outfile) except Exception as e: indent = len(program_name) * " " sys.stderr.write(program_name + ": " + repr(e) + "\n") sys.stderr.write(indent + " for help use --help") return 2 if __name__ == "__main__": if DEBUG: sys.argv.append("-h") if TESTRUN: import doctest doctest.testmod() if PROFILE: import cProfile import pstats profile_filename = 'mschxudp_profile.txt' cProfile.run('main()', profile_filename) statsfile = open("profile_stats.txt", "wb") p = pstats.Stats(profile_filename, stream=statsfile) stats = p.strip_dirs().sort_stats('cumulative') stats.print_stats() statsfile.close() sys.exit(0) sys.exit(main())
import eos.events # todo: move this to eos initialization? # noinspection PyUnresolvedReferences import service.prefetch # noqa: F401 # Make sure the saveddata db exists if not os.path.exists(config.savePath): os.mkdir(config.savePath) eos.db.saveddata_meta.create_all() from gui.mainFrame import MainFrame # set title if it wasn't supplied by argument if options.title is None: options.title = "pyfa %s - Python Fitting Assistant" % (config.getVersion()) pyfa = wx.App(False) mf = MainFrame(options.title) ErrorHandler.SetParent(mf) if options.profile_path: profile_path = os.path.join(options.profile_path, 'pyfa-{}.profile'.format(datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))) pyfalog.debug("Starting pyfa with a profiler, saving to {}".format(profile_path)) import cProfile cProfile.run('pyfa.MainLoop()', profile_path) else: pyfa.MainLoop() # TODO: Add some thread cleanup code here. Right now we bail, and that can lead to orphaned threads or threads not properly exiting. sys.exit()
def profile(filename_profile): import cProfile cProfile.run('run_mat_inv()', filename_profile) read_profile_result(filename_profile)
o.fatal("exactly one filename expected") drecurse_top = extra[0] excluded_paths = parse_excludes(flags, o.fatal) if not drecurse_top.startswith('/'): excluded_paths = [relpath(x) for x in excluded_paths] exclude_rxs = parse_rx_excludes(flags, o.fatal) it = drecurse.recursive_dirlist([drecurse_top], opt.xdev, excluded_paths=excluded_paths, exclude_rxs=exclude_rxs) if opt.profile: import cProfile def do_it(): for i in it: pass cProfile.run('do_it()') else: if opt.quiet: for i in it: pass else: for (name, st) in it: print name if saved_errors: log('WARNING: %d errors encountered.\n' % len(saved_errors)) sys.exit(1)
logger.info('Get business days, excluding holidays') bus_days = tsf.create_calendar_bus_days(start, end, cal='FX') print(bus_days) if False: logger.info('Remove out of hours') rng = pandas.date_range('01 Jan 2014', '05 Jan 2014', freq='1min') intraday_vals = pandas.DataFrame(data=pandas.np.random.randn(len(rng)), index=rng) intraday_vals = tsf.resample_time_series(intraday_vals, '60min') intraday_vals = tsf.remove_out_FX_out_of_hours(intraday_vals) print(intraday_vals) if True: logger.info('Remove holiday days') rng = pandas.date_range('01 Jan 2007', '05 Jan 2014', freq='1min') intraday_vals = pandas.DataFrame(data=pandas.np.random.randn(len(rng)), index=rng) import cProfile cProfile.run( "intraday_vals = tsf.filter_time_series_by_holidays(intraday_vals, 'FX')" ) print(intraday_vals)
av = sys.argv[1:] if not av: main(av) firstarg = av[0].lower() if firstarg == "hotshot": import hotshot import hotshot.stats av = av[1:] prof_log_name = "XXXX.prof" prof = hotshot.Profile(prof_log_name) # benchtime, result = prof.runcall(main, *av) result = prof.runcall(main, *(av, )) print("result", repr(result)) prof.close() stats = hotshot.stats.load(prof_log_name) stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(20) elif firstarg == "profile": import cProfile av = av[1:] cProfile.run('main(av)', 'YYYY.prof') import pstats p = pstats.Stats('YYYY.prof') p.strip_dirs().sort_stats('cumulative').print_stats(30) elif firstarg == "psyco": PSYCO = 1 main(av[1:]) else: main(av)
class Counter(): def __init__(self) -> None: self.data = 0 def increase(self): self.data += 1 return self.data def decrease(self): self.data -= 1 return self.data def sauts(n): p = Counter() return sum([(p.decrease() == 0 if getrandbits(1) == 0 else p.increase() == 0) for _ in range(n)]) """ """ def test(): from time import time start = time() sum([sauts(i) == 0 for i in range(10000)]) print(time() - start) """ def test(): return sauts(1000000000) import cProfile cProfile.run("test()")
Nx = 100 Ntheta = 1000 T = min(500, xmodule.modelx.model_obs.shape[0]) model_obs = xmodule.modelx.model_obs[0:T, :] model_states = xmodule.modelx.model_states[0:T, :] theta = xmodule.modelx.parameters.copy() theta = theta.reshape(nbparameters, 1) thetaparticles = repeat(theta, Ntheta).reshape(nbparameters, Ntheta) print "parallel SIRs starting..." import cProfile cProfile.run( """ parallelSIRs = ParallelSIRs(Nx, thetaparticles, model_obs, xmodule.modelx, saveLL = True, verbose = True) parallelSIRs.first_step() parallelSIRs.next_steps() """, "prof") print "done." import pstats p = pstats.Stats('prof') p.sort_stats("time").print_stats(15) SMCLogLikelihoods = parallelSIRs.getTotalLogLike() results = { "SMCLL": SMCLogLikelihoods, "observations": model_obs, "allLL": parallelSIRs.allLL } resultsfolder = os.path.join(THISPATH, "results")
import numpy as np import matplotlib.pyplot as plt import skimage as sk import skimage.io as io from skimage import data as da import skimage import cv2 import base64 import cProfile #from line_profiler import LineProfiler as lp crun = run = lambda cmd:cProfile.run(cmd,sort='time') from math import tan from math import log import math array,arange = np.array, np.arange random = lambda shape,maxx:(np.random.random( shape if (isinstance(shape,tuple) or isinstance(shape,list) )else (shape,shape))*maxx).astype(int) normalizing = lambda a:(a.astype(float)-a.min())/(a.max() - a.min()) stand = lambda num:max(min(255, int(round(num))),0) roundInt = lambda x: int(round(x))
sym = {FOREX_XAUUSD, FOREX_XAGUSD} # mybt = MyBackTest('20160920', '20160930', sym, mode=BACK_TEST_MODE_ONTICK, chunk_size=TIME_STEP_1DAY) mybt = MyBackTest('20150820', '20150920', sym, mode=BACK_TEST_MODE_ONBAR, time_step=TIME_STEP_1MINUTE) a = ExhibitionSubWindow(mybt) mybt.start() a.show() ret = app.exec_() finish = clock() t = finish - start print t sys.exit(ret) if __name__ == "__main__": import cProfile # 把分析结果保存到文件中,不过内容可读性差...需要调用pstats模块分析结果 cProfile.run("test()", "result") import pstats # 创建Stats对象 p = pstats.Stats("result") # 按照在一个函数中累积的运行时间进行排序 # print_stats(3):只打印前3行函数的信息,参数还可为小数,表示前百分之几的函数信息 p.strip_dirs().sort_stats("cumulative").print_stats()
nargs='?', help='specific benchmarks to run') parser.add_argument('--profile', dest='profile', action='store_const', const=True, default=False, help='run profiling against the report') parser.add_argument('--loops', '-l', dest='loops', default=100, type=int, help='Number of loops to run for each benchmark') args = parser.parse_args() benchmarks_to_run = list(benchmarks.values()) if args.benchmarks: benchmarks_to_run = [ benchmark for benchmark in benchmarks_to_run if benchmark.code in args.benchmarks ] print('\nRunning {0} benchmarks...\n'.format(len(benchmarks_to_run))) for benchmark in benchmarks_to_run: if args.profile: setup_data = benchmark.setup() cProfile.run('benchmark.lifter_version(setup_data)') else: benchmark.report(loops=args.loops)
def cycle(): n = 32 # '+ 1' чтобы таблица печаталась полностью for i in range(((127 - 32) // 10) + 1): y = 0 while y < 10 and n <= 127: print('num:', '%3d' % n, chr(n), end='|') n += 1 y += 1 print() # Мы будем тут только передавать номер числа def recursion(num): if num <= 127: if num % 10 == 2 and num != 32: print() print('num:', '%3d' % num, chr(num), end='|') num += 1 recursion(num) # # if __name__ == '__main__': # # cycle() # recursion() cProfile.run('cycle()') num = 32 cProfile.run('recursion(num)')
populate_data() create_indexes() if __name__ == "__main__": global args import argparse import cProfile parser = argparse.ArgumentParser() parser.add_argument("-b", "--branch", default="HEAD", help="git branch [HEAD]") parser.add_argument("-d", "--debug", action="store_true", help="debugging output") parser.add_argument("-f", "--full", action="store_true", help="git --full-tree") parser.add_argument("-p", "--profile", action="store_true", help="profile output") args = parser.parse_args() if args.profile: cProfile.run('main_program()') else: main_program()
break # give adv to player w one move to win elif (board[x] == board[y] == -1 and board[z] == 0) or (board[x] == board[z] == -1 and board[y] == 0) or (board[y] == board[z] == -1 and board[x] == 0): adv = -1 elif (board[x] == board[y] == 1 and board[z] == 0) or (board[x] == board[z] == 1 and board[y] == 0) or (board[y] == board[z] == 1 and board[x] == 0): adv = 1 # else give to player who went first else: adv = 1 return win, adv def main(): # create empty board board = [0] * 10 # run game X goes first run(board, 1) if __name__ == "__main__": cProfile.run('main()')
str(locations)) def test_python_read_files(): [open(fn, 'rb').read() for fn in filenames4] if __name__ == "__main__": runs = [ 'datasets=test_full_read()', # 'test_partial()', # 'test_mem_read_full()', # 'test_mem_read_small()', 'test_python_read_files()', ] random.shuffle(runs) for testrun in runs: cProfile.run(testrun, tempfile) p = pstats.Stats(tempfile) print("---------------") print(testrun) print("---------------") p.strip_dirs().sort_stats('time').print_stats(5) print("Confirming file read worked -- check for data elements near end") try: image_sizes = [len(ds.PixelData) for ds in datasets] # NOQA except Exception as e: print("Failed to access dataset data for all files\nError:" + str(e)) else: print("Reads checked ok.") # Clear disk cache for next run? if not on_windows:
# ATT_TREES = read_informs_tree() pass else: DATA = read_adult(INPUT_DS_PATH) ATT_TREES = read_adult_tree() if FLAG == 'k': get_result_k(ATT_TREES, DATA, TYPE_ALG) elif FLAG == 'qi': get_result_qi(ATT_TREES, DATA, TYPE_ALG) elif FLAG == 'data': get_result_dataset(ATT_TREES, DATA, TYPE_ALG) elif FLAG == 'n': get_result_n(ATT_TREES, DATA, TYPE_ALG) elif FLAG == '': if __DEBUG: cProfile.run('get_result_one(ATT_TREES, DATA, TYPE_ALG)') else: get_result_one(ATT_TREES, DATA, TYPE_ALG) else: try: INPUT_K = int(FLAG) if LOG_TO_FILE == '1': sys.stdout = open("log/oka_results_k_" + str(INPUT_K) + ".log", "w") get_result_one(ATT_TREES, DATA, TYPE_ALG, INPUT_K, OUTPUT_DS_PATH) except ValueError: traceback.print_exc() print( "Usage: python anonymizer [a | i] [knn | kmember | oka] [k | qi | data| n]" ) print("a: adult dataset, i: INFORMS ataset")
import cProfile cProfile.run('sprint_4_task_5.main()')
# Copyright (c) 2008 Doug Hellmann All rights reserved. # """ """ __version__ = "$Id$" #end_pymotw_header import cProfile as profile import pstats from profile_fibonacci_memoized import fib, fib_seq # Create 5 set of stats filenames = [] for i in range(5): filename = 'profile_stats_%d.stats' % i profile.run('print %d, fib_seq(20)' % i, filename) # Read all 5 stats files into a single object stats = pstats.Stats('profile_stats_0.stats') for i in range(1, 5): stats.add('profile_stats_%d.stats' % i) # Clean up filenames for the report stats.strip_dirs() # Sort the statistics by the cumulative time spent in the function stats.sort_stats('cumulative') stats.print_stats()
kwargs['X'] = X kwargs['regparam'] = '1' kwargs['kernel'] = 'GaussianKernel' kwargs['gamma'] = 2.**(-20.) kwargs['bias'] = 1 kwargs['number_of_clusters'] = classcount kwargs['regparam'] = 2.**(-30.) #kwargs['learner'] = 'MMC' kwargs['basis_vectors'] = X[bvecs] tcb = TestCallback() kwargs['callback'] = tcb #mselector = None mmc = MMC(**kwargs) #mmc = SteepestDescentMMC.createLearner(**kwargs) #trainresults = core.trainModel(**kwargs) #model = trainresults['model'] #writer.write_ints('./examples/predictions/clusters.txt', trainresults['predicted_clusters_for_training_data']) class Test(unittest.TestCase): def test_mmc(self): dotesting() if __name__ == "__main__": import cProfile #dotesting() cProfile.run('dotesting(1500)')
self.mnemosyne.review_controller().show_new_question() for i in range(200): self.mnemosyne.review_controller().show_answer() self.mnemosyne.review_controller().grade_answer(0) Client.__init__(self, "client_machine_id", self.mnemosyne.database(), self.mnemosyne.main_widget()) def do_sync(self): #self.BUFFER_SIZE = 10*8192 #self.behind_proxy = True self.sync("localhost", 8186, "user", "pass") self.mnemosyne.database().save() if __name__ == '__main__': client = MyClient() def sync(): client.do_sync() tests = ["sync()"] for test in tests: cProfile.run(test, "mnemosyne_profile." + test.replace("()", "")) print print "*** ", test, " ***" print p = pstats.Stats('mnemosyne_profile.' + test.replace("()", "")) p.strip_dirs().sort_stats('cumulative').print_stats(number_of_calls)
E = 57000 * (4500)**0.5 * 144 # psf t = 1 # ft width = 10 # ft height = 10 # ft nu = 0.17 meshsize = 0.5 # ft load = 250 # psf myWall = RectWall(width, height, t, E, nu, meshsize, 'Fixed', 'Fixed', 'Fixed', 'Fixed') myWall.add_load(0, height, load, load) # Analyze the wall import cProfile cProfile.run('myWall.analyze()', sort='cumtime') # Render the wall. The default load combination 'Combo 1' will be displayed since we're not specifying otherwise. # The plates will be set to show the 'Mx' results from pynite import Visualization Visualization.RenderModel(myWall.fem, text_height=meshsize / 6, render_loads=True, color_map='Mx') # Plot the displacement contour myWall.plot_disp() # Plot the shear force contours myWall.plot_forces('Qx')
import cProfile from setup_libraries import load_asm_math def performance_test(n): result = 0 for i in range(n): result += float(i) * 2 return result if __name__ == '__main__': asm_math = load_asm_math() n = 2500000 python_result = performance_test(n) cpp_result = asm_math.performance_test(n) # results printing print("performance_test({}) = {:.4f}".format(n, python_result)) print("asm_math.performance_test({}) = {:.4f}".format(n, cpp_result)) # profiling cProfile.run("performance_test(n)") cProfile.run("asm_math.performance_test(n)")
def main(): # pass cProfile.run('SearchingAndSortingG.main()')
import cProfile import palingrams_optimized cProfile.run('palingrams_optimized.find_palingrams()')
timer = Timer() with timer: for i in range(1000000): func(*args, **kwargs) return timer.duration_in_seconds() if __name__ == '__main__': # timeit.timeit(stmt, setup, timer, number, globals) print(timeit.timeit(f1)) print(timeit.timeit('f2(42)', 'from measure_time import f2')) print(timeit.timeit('f2(42)', globals=globals())) print(timeit.timeit(wrapper(f2, 42))) print(timeit.timeit(partial(f2, 42))) print(timeit.timeit(partial(f2, 42), number=2000000)) print(measure_time(f2, 42)) with measure_time_context(): for i in range(1000000): f2(42) print(self_timer_context(f2, 42)) cProfile.run('f2(42)') cProfile.run('[f2(42) for i in range(1000000)]') cProfile.run('[f2(42) for i in range(1000000)]', sort='tottime') profile.run('[f2(42) for i in range(1000000)]', sort='tottime')
argv = FLAGS(sys.argv) from monitor import memory, human start_mem = memory() if FLAGS.fakemem: s = Model(FLAGS.fakemem) t = Model(FLAGS.fakemem) print >> logs, "memory usage after read in fake: ", human( memory(start_mem)) if FLAGS.weights is None: if not FLAGS.sim: print >> logs, "Error: must specify a weights file" + str(FLAGS) sys.exit(1) else: model = None # can simulate w/o a model else: model = Model(FLAGS.weights) #FLAGS.model, FLAGS.weights) if FLAGS.profile: import cProfile as profile profile.run('main()', '/tmp/a') import pstats p = pstats.Stats('/tmp/a') p.sort_stats('cumulative', 'time').print_stats(60) else: main()
"sqlmap_profile.png") if os.path.exists(profileOutputFile): os.remove(profileOutputFile) if os.path.exists(dotOutputFile): os.remove(dotOutputFile) if os.path.exists(imageOutputFile): os.remove(imageOutputFile) infoMsg = "profiling the execution into file %s" % profileOutputFile logger.info(infoMsg) # Start sqlmap main function and generate a raw profile file cProfile.run("start()", profileOutputFile) infoMsg = "converting profile data into a dot file '%s'" % dotOutputFile logger.info(infoMsg) # Create dot file by using extra/gprof2dot/gprof2dot.py # http://code.google.com/p/jrfonseca/wiki/Gprof2Dot dotFilePointer = codecs.open(dotOutputFile, 'wt', UNICODE_ENCODING) parser = gprof2dot.PstatsParser(profileOutputFile) profile = parser.parse() profile.prune(0.5 / 100.0, 0.1 / 100.0) dot = gprof2dot.DotWriter(dotFilePointer) dot.graph(profile, gprof2dot.TEMPERATURE_COLORMAP) dotFilePointer.close() infoMsg = "converting dot file into a graph image '%s'" % imageOutputFile
# pygame.draw.lines(screen, THECOLORS["black"], False, [p1,p2]) for i in dic: line = dic[i] body = line.body # print(list(line.a.rotated(body.angle))+["fhfjffjfjh"]) pv1 = body.position + line.a.rotated(body.angle) pv2 = body.position + line.b.rotated(body.angle) p1 = pv1.x, flipy(pv1.y) p2 = pv2.x, flipy(pv2.y) pygame.draw.lines(screen, THECOLORS["lightgray"], False, [p1, p2]) ### Flip screen pygame.display.flip() # clock.tick(50) pygame.display.set_caption("fps: " + str(clock.get_fps())) # time.sleep(0.1) if __name__ == '__main__': doprof = 0 if not doprof: main() else: import cProfile, pstats prof = cProfile.run("main()", "profile.prof") stats = pstats.Stats("profile.prof") stats.strip_dirs() stats.sort_stats('cumulative', 'time', 'calls') stats.print_stats(30)
# coding=utf-8 # # ncalls表示函数调用的次数 # tottime表示指定函数消耗的时间 # percall表示函数每次调用的平均耗时,tottime/ncalls # cumtime表示该函数及其所有子函数的调用耗时,就是从函数开始调用到返回的时间 # 第二个percall为cumtime/primitive calls的值 # filename:lineno(function)表示每个函数所在的位置 # console: python -m cProfile caption_5_io.py import cProfile def func(): print("") cProfile.run('func()')
142, 131, 51, 229, 9, 607, 326, 522, 687, 792, 845, 665, 358, 91, 720, 155, 565, 99, 26, 650, 539, 780, 589, 950, 935, 372, 227, 424, 750, 833, 554, 841, 552, 60, 757, 430, 916, 140, 790, 426, 776, 96, 199, 923, 806, 949, 755, 711, 659, 911, 611, 310, 774, 265, 880, 690, 706, 761, 286, 255, 756, 204, 444, 478, 601, 529, 669, 241, 784, 566, 528, 208, 270, 511, 236, 271, 378, 58, 453, 467, 233, 250, 567, 296, 932, 989, 367, 626, 35, 162, 887, 572, 603, 564, 797, 280, 406, 970, 689, 408, 431, 638, 489, 85, 50, 357, 803, 47, 555, 793, 422, 763, 110, 869, 861, 253, 320, 538, 347, 405, 769, 64, 875, 630, 537, 328, 553, 166, 948, 303, 160, 800, 507, 920, 922, 90, 693, 636, 17, 455, 183, 210, 856, 762, 656, 174, 873, 579, 176, 688, 640, 1, 938, 902, 341, 740, 581, 427, 111, 972, 443, 22, 791, 304, 574, 575, 725, 477, 700, 817, 381, 479, 248, 121, 411, 547, 182, 871, 599, 203, 13, 224, 541, 724, 178, 775, 388, 4, 251, 321, 52, 88, 100, 279, 614, 839, 84, 151, 735, 40, 752, 773, 376, 77, 476, 708, 396, 988, 961, 24, 231, 445, 609, 952, 965, 986, 414, 451, 881, 42, 257, 32, 334, 130, 596, 527, 94, 333, 317, 244, 960, 710, 852, 862, 421, 81, 37, 452, 274, 187, 268, 520, 491, 778, 18, 743, 620, 145, 72, 370, 118, 748, 633, 997, 436, 143, 573, 495, 180, 34 ]) print largestDivisibleSubset([ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824 ]) import cProfile cProfile.run('testlargestDivisibleSubset()')