Пример #1
0
    def __call__(self, environ, start_response):
        query_params = urlparse.parse_qs(environ['QUERY_STRING'])

        if "profile" in query_params:
            import cProfile

            def fake_start_response(*args, **kwargs):
                pass

            try:
                tmpfile = tempfile.NamedTemporaryFile()

                cProfile.runctx("self.app(environ, fake_start_response)",
                                globals(),
                                locals(),
                                tmpfile.name)

                tmpfile.seek(0)

                start_response('200 OK', 
                               [('Content-Type', 'application/octet-stream'),
                                ('Content-Disposition',
                                 'attachment; filename=profile.bin')])
                return tmpfile.read()
            finally:
                tmpfile.close()

        return self.app(environ, start_response)
Пример #2
0
 def pytest_pyfunc_call(self, __multicall__, pyfuncitem):
     """Hook into pytest_pyfunc_call; marked as a tryfirst hook so that we
     can call everyone else inside `cProfile.runctx`.
     """
     prof = os.path.join("prof", pyfuncitem.name + ".prof")
     cProfile.runctx("fn()", globals(), dict(fn=__multicall__.execute), filename=prof)
     self.profs.append(prof)
Пример #3
0
def run(num=1, time=30, direct = 'profile_data/test/'):
    
    startup.init(False) #False = no splash screen
    #Startup has to happen before other imports that use resources.
    from core import player
    from core.game import game
    
    #Filenames
    field_file = DEF.FIELD_FILE_DIRECTORY + "_benchmark.utf"
    files = []
    FPS = []
    for n in xrange(0, num):
        
        #Setup the game data.
        player_descriptors = []
        player_descriptors.append(player.PlayerDescriptor(DEF.PTYPE_CPU,team=1))
        player_descriptors[0].name = "Player"
        for i in range(0,15):
            player_descriptors.append(player.PlayerDescriptor(DEF.PTYPE_CPU,team=i+2))
            player_descriptors[i+1].name = DEF.PLAYER_NAMES[i]

        #Run and profile.
        direct
        if not os.path.exists(direct):
            os.makedirs(direct)
        files.append(direct + PREFIX+ "_%d.prof" % n)
        GAME = game.Game(field_file, player_descriptors)
        cProfile.runctx("FPS.append(run_game(GAME, time))", globals(), {'GAME':GAME, 'FPS':FPS, 'time':time}, files[n])

    startup.close()
    return files, FPS
Пример #4
0
def profile_command(
        reports="ProfileReporter",
        n=1, # How many times to run dexy for profiling.
        **kw # Accepts additional keyword arguments for the 'dexy' command
    ):
    """
    Runs dexy using cProfile to do time-based profiling. Uses ProfileReport
    (the only report enabled by default) to present profiling information.
    Other reports can be specified, report time is not included in profiling.
    Running ProfileReport each time ensures that profiling data is stored in
    sqlite database for comparison (a 'dexy reset' will delete this database).
    """
    dexy_fn = args.function_for(dexy.commands, "dexy")
    defaults = args.determine_kwargs(dexy_fn)
    defaults.update(kw)
    defaults['profile'] = True

    locals_for_run_dexy = {'args' : defaults}

    logs_dir = kw.has_key("logsdir") and kw['logsdir'] or Constants.DEFAULT_LDIR
    prof_file = os.path.join(logs_dir, "dexy.prof")

    report_kwargs = {}
    if kw.has_key('artifactclass'):
        report_kwargs['artifactclass'] = kw['artifactclass']

    for i in xrange(n):
        print "===== run %s of %s =====" % (i+1, n)
        cProfile.runctx("run_dexy(args)", globals(), copy.deepcopy(locals_for_run_dexy), prof_file)
        reports_command(reports=reports, **report_kwargs)
Пример #5
0
def Test(fileName = None, period = 0.2, real = False,
         verbose = 0, profiling = False):
    """Module self test

    """
    console = getConsole(verbosity=console.Wordage[verbose])

    import building
    import tasking

    if not fileName:
        fileName = "../plan/box1.flo"

    console.terse( "Building ...")
    skedder = tasking.Skedder(name = "TestSkedder",
                              period = period,
                              real = real,
                              filepath = fileName)
    if skedder.build():
        console.terse( "\n\nStarting mission from file 0}...\n".format(fileName))
        if not profiling:
            skedder.run()
        else:
            cProfile.runctx('skedder.run()',globals(),locals(), './profiles/skedder')

            p = pstats.Stats('./profiles/skedder')
            p.sort_stats('time').print_stats()
            p.print_callers()
            p.print_callees()


    return skedder
Пример #6
0
    def write(self, Writer, items, toc):
        writer = Writer(self.opts, self.log, cover_data=self.cover_data,
                toc=toc)
        writer.report_progress = self.report_progress

        close = False
        if not hasattr(self.output_path, 'write'):
            close = True
            if not os.path.exists(os.path.dirname(self.output_path)) and os.path.dirname(self.output_path) != '':
                os.makedirs(os.path.dirname(self.output_path))
            out_stream = open(self.output_path, 'wb')
        else:
            out_stream = self.output_path

        out_stream.seek(0)
        out_stream.truncate()
        self.log.debug('Rendering pages to PDF...')
        import time
        st = time.time()
        if False:
            import cProfile
            cProfile.runctx('writer.dump(items, out_stream, PDFMetadata(self.metadata))',
                        globals(), locals(), '/tmp/profile')
        else:
            writer.dump(items, out_stream, PDFMetadata(self.metadata))
        self.log('Rendered PDF in %g seconds:'%(time.time()-st))

        if close:
            out_stream.close()
Пример #7
0
def player_profiled(
    rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir, app_version
):
    import cProfile
    import subprocess
    import os
    from .player import player

    cProfile.runctx(
        "player(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir, app_version)",
        {
            "rec_dir": rec_dir,
            "ipc_pub_url": ipc_pub_url,
            "ipc_sub_url": ipc_sub_url,
            "ipc_push_url": ipc_push_url,
            "user_dir": user_dir,
            "app_version": app_version,
        },
        locals(),
        "player.pstats",
    )
    loc = os.path.abspath(__file__).rsplit("pupil_src", 1)
    gprof2dot_loc = os.path.join(loc[0], "pupil_src", "shared_modules", "gprof2dot.py")
    subprocess.call(
        "python "
        + gprof2dot_loc
        + " -f pstats player.pstats | dot -Tpng -o player_cpu_time.png",
        shell=True,
    )
    print(
        "created cpu time graph for world process. Please check out the png next to the player.py file"
    )
Пример #8
0
def aunique_benchmark(lib, prof):
    def _build_tree():
        vfs.libtree(lib)

    # Measure path generation performance with %aunique{} included.
    lib.path_formats = [
        (library.PF_KEY_DEFAULT,
         Template('$albumartist/$album%aunique{}/$track $title')),
    ]
    if prof:
        cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
                        'paths.withaunique.prof')
    else:
        interval = timeit.timeit(_build_tree, number=1)
        print('With %aunique:', interval)

    # And with %aunique replaceed with a "cheap" no-op function.
    lib.path_formats = [
        (library.PF_KEY_DEFAULT,
         Template('$albumartist/$album%lower{}/$track $title')),
    ]
    if prof:
        cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
                        'paths.withoutaunique.prof')
    else:
        interval = timeit.timeit(_build_tree, number=1)
        print('Without %aunique:', interval)
Пример #9
0
    def run_jobs(self, queue):
        """Initialize a SOLUS session and run the jobs"""

        # Initialize the session
        try:
            session = SolusSession(self.user, self.passwd)
        except EnvironmentError as e:
            logging.critical(e)
            # Can't log in, therefore can't do any jobs
            # As long as at least 1 of the threads can log in,
            # the scraper will still work
            return

        # Run all the jobs in the job queue
        while True:
            try:
                job = queue.get_nowait()
            except Empty as e:
                return

            # Run the job
            if PROFILE:
                import cProfile
                cProfile.runctx("SolusScraper(session, job, self.db).start()", globals(), locals())
            else:
                SolusScraper(session, job, self.db).start()
Пример #10
0
def run_function_for_ex_ids(f, name, ex_ids, timing_dir=TIMING_DIR,
                            profile_dir=PROFILE_DIR):
    """ repeatedly runs a function on each ex_id in a list.
    For each run, it stores timing data and a profiler binary file.

    :param f: the function that will be run with ex_id as the only input
    :param name: a string specifying the type of job being performed
    :param ex_ids: a list of ex_ids that will be used as inputs for function, f
    """
    # initialize file names for timing and profileing
    now_string = time.ctime().replace(' ', '_').replace(':', '.').strip()
    ensure_dir_exists(timing_dir)
    time_file = '{dir}/{type}_{now}_x{N}.json'.format(dir=timing_dir, type=name,
                                                      now=now_string, N=len(ex_ids))

    time_storage = {}
    ensure_dir_exists(profile_dir)
    for ex_id in ex_ids:
        print '{fun}ing {ei} starting at: {t}'.format(fun=name, ei=ex_id,
                                                      t=time.clock())
        profile_file = '{dir}/{type}_{id}_{now}.profile'.format(dir=profile_dir,
                                                                type=name,
                                                                now=now_string,
                                                                id=ex_id)
        time_storage[ex_id] = {'start': time.clock()}
        try:
            profile.runctx('f(ex_id)', globals(), locals(), filename=profile_file)
            time_storage[ex_id]['finish'] = time.clock()
            json.dump(time_storage, open(time_file, 'w'))
        except Exception as e:
            print 'Error with {name} at time {t}\n{err}'.format(name=name, t=time.clock(), err=e)
            logging.exception("HELTENA")
    return True
Пример #11
0
def main():
    args = docopt(__doc__)
    if args["--debug"] is not False:
        print "Args: \n", args
    fixtureArg = args["--fixture"]
    if "::" not in fixtureArg:
        fixtureIni, fixtureSection = "~/.pacbio/data-fixtures.ini", fixtureArg
    else:
        fixtureIni, fixtureSection = fixtureArg.split("::")
    fixture = Fixture.fromIniFile(fixtureIni, fixtureSection)
    holeNumber = int(args["--hole"])

    if args["--headless"] is not False:
        banner = "Convenient variables available: zmw, fixture"
        zmw = fixture[holeNumber]
        try:
            from IPython import embed
            embed(banner1=banner)
        except ImportError:
            code.InteractiveConsole(locals=locals()).interact(banner=banner)
    else:
        app = QtGui.QApplication([])
        traceViewer = TraceViewer(fixture)
        traceViewer.setFocus(holeNumber)
        if args["--debug"]:
            debug_trace()
        if args["--profile"]:
            import cProfile
            cProfile.runctx("app.exec_()",
                            globals=globals(),
                            locals=locals())
        else:
            app.exec_()
Пример #12
0
    def testC_Profile(self):
        """
        _Profile_

        DON'T RUN THIS!
        """

        return

        import cProfile, pstats

        myThread = threading.currentThread()

        name    = makeUUID()

        config = self.getConfig()

        jobList = self.createGiantJobSet(name = name, config = config,
                                         nSubs = 10, nJobs = 1000, nFiles = 10)

        cleanCouch = CleanCouchPoller(config = config)
        cleanCouch.setup()

        cProfile.runctx("cleanCouch.algorithm()", globals(), locals(), filename = "testStats.stat")

        p = pstats.Stats('testStats.stat')
        p.sort_stats('cumulative')
        p.print_stats()
        return
Пример #13
0
 def run(self):
     for task in self.tasks:
         if not self.profiling:
             task.run()
         else:
             cProfile.runctx('task.run()', globals(), locals(), 'process_%s.out' % self.name)
     self.log.info("folding tentacle %s" % self.name)
Пример #14
0
def profile(name, env, filename=None, verbose=False):
    if filename:
        filename = name + '-' + filename
        print('Profiling %s ==> %s' % (name, filename))

    else:
        filename = None

        title = name + ' profile'
        print()
        print('=' * len(title))
        print(title)
        print('=' * len(title))

    func = create_bench(name, env)

    gc.collect()
    code = 'for x in range(10000): func()'

    if verbose:
        if pprofile is None:
            print('pprofile not found. Please install pprofile and try again.')
            return

        pprofile.runctx(code, locals(), globals(), filename=filename)

    else:
        cProfile.runctx(code, locals(), globals(),
                        sort='tottime', filename=filename)
Пример #15
0
def main():
    scriptname = 'analysis.py'
    tmpdir, profile = False, False
    for arg in sys.argv[1:]:
        if '.py' in arg: scriptname = arg
        if arg=='--tmpdir': tmpdir = True
        if arg=='--profile': profile = True
    if tmpdir:
        config.workdir = tempfile.mkdtemp()
    else:
        config.workdir = os.path.join(os.getcwd(), scriptname[:-3])
        config.workdir = os.path.realpath(config.workdir)    
    setup_workdir()
    config.theta_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
    config.report = html_report(os.path.join(config.workdir, 'index.html'))
    variables = globals()
    variables['report'] = config.report
    utils.info("executing script %s" % scriptname)
    try:
        if profile:
            import cProfile
            cProfile.runctx("execfile(scriptname, variables)", globals(), locals())
        else:
            execfile(scriptname, variables)
    except Exception as e:
        print "error while trying to execute analysis script %s:" % scriptname
        traceback.print_exc()
        sys.exit(1)
    utils.info("workdir is %s" % config.workdir)
Пример #16
0
def __main__(argv):
	min_key_len = 1024

	if(len(argv) != 14):
		raise ValueError, "Usage: %s id key_len round_id n_nodes my_ip my_port leader_ip leader_port dnstr_ip dnstr_port upstr_ip upstr_port msg_len" % (argv[0])

	logger = logging.getLogger()
	logger.setLevel(logging.DEBUG)
	debug("Starting node")

	id = int(argv[1])
	key_len = int(argv[2])
	round_id = int(argv[3])
	n_nodes = int(argv[4])
	my_addr = (argv[5], int(argv[6]))
	leader_addr = (argv[7], int(argv[8]))
	up_addr = (argv[9], int(argv[10]))
	dn_addr = (argv[11], int(argv[12]))
	msg_len = int(argv[13])

	msg_file = AnonCrypto.random_file(msg_len)

	node = bulk_node.bulk_node(id, key_len, round_id, n_nodes,
			my_addr, leader_addr, up_addr, dn_addr, msg_file)
	cProfile.runctx('mynode.run_protocol()', {'mynode': node}, {})
#node.run_protocol()
	fnames = node.output_filenames()

	for i in xrange(0, len(fnames)):
		copyfile(fnames[i], "data/node%04d-%04d.out" % (id, i))

	return
Пример #17
0
def profileRunExperiment(*args, **keywords):
  cProfile.runctx(
      "runExperiment(*args, **keywords)",
      globals=globals(),
      locals=dict(args=args, keywords=keywords),
      filename="re.profile"
    )
Пример #18
0
def _profile(filename, fn, *args, **kw):
    gc.collect()

    profiler.runctx('result = fn(*args, **kw)', globals(), locals(),
                    filename=filename)

    return locals()['result']
Пример #19
0
def run_with_profile(runsnake=False, dump=False):
    import cProfile
    import pstats
    filename = "orm2010.profile"

    if os.path.exists("orm2010.profile"):
        os.remove("orm2010.profile")

    def status(msg):
        print(msg)

    cProfile.runctx('runit(status)', globals(), locals(), filename)
    stats = pstats.Stats(filename)

    counts_by_methname = dict((key[2], stats.stats[key][0]) for key in stats.stats)

    print("SQLA Version: %s" % __version__)
    print("Total calls %d" % stats.total_calls)
    print("Total cpu seconds: %.2f" % stats.total_tt)
    print('Total execute calls: %d' \
        % counts_by_methname["<method 'execute' of 'sqlite3.Cursor' "
                             "objects>"])
    print('Total executemany calls: %d' \
        % counts_by_methname.get("<method 'executemany' of 'sqlite3.Cursor' "
                             "objects>", 0))

    if dump:
        stats.sort_stats('time', 'calls')
        stats.print_stats()

    if runsnake:
        os.system("runsnake %s" % filename)
Пример #20
0
    def _main(self):
        from DegenPrimer.WorkCounter import WorkCounter
        from DegenPrimer.Equilibrium import Equilibrium, EquilibriumSolver, EquilibriumBase
        import cProfile
        import shelve
        
        db = shelve.open('../data/reactions.shelve', 'r', protocol=-1)
        reactions = db['reactions']
        concentrations = db['concentrations']
        
        print len(reactions)
        print len(concentrations)
        
#        cProfile.runctx('''for i in xrange(1):
#        eq = EquilibriumSolver(self.abort_event, reactions, concentrations, 1e-10)
#        eq.calculate()
#        print eq.objective_value''', 
#        globals(), locals(),
#        'EquilibriumSolver.profile')
        
        cProfile.runctx('''for i in xrange(1):
        eq = Equilibrium(self.abort_event, reactions, concentrations, 1e-10)
        eq.calculate(WorkCounter())''', 
        globals(), locals(),
        'Equilibrium.profile')
    
        print 'Done'
Пример #21
0
def main():
	# abort silently on signal
	signal.signal(signal.SIGINT, functools.partial(exithandler, 130))
	signal.signal(signal.SIGTERM, functools.partial(exithandler, 1))

	# use locale-specific time.strftime handling.
	try:
		locale.setlocale(locale.LC_TIME, '')
	except locale.Error: # Workaround for "locale.Error: unsupported locale setting"
		pass

	#chdir to Unknown Horizons root
	os.chdir( find_uh_position() )
	logging.config.fileConfig( os.path.join('content', 'logging.conf'))

	create_user_dirs()

	options = get_option_parser().parse_args()[0]
	setup_debugging(options)

	# NOTE: this might cause a program restart
	init_environment()

	# test if required libs can be found or display specific error message
	try:
		import yaml
	except ImportError:
		headline = _('Error: Unable to find required library "PyYAML".')
		msg = _("We are sorry to inform you that a library that is required by Unknown Horizons, is missing and needs to be installed.") + "\n" + \
		    _('Installers for Windows users are available at "http://pyyaml.org/wiki/PyYAML", Linux users should find it in their packagement management system under the name "pyyaml" or "python-yaml".')
		standalone_error_popup(headline, msg)
		exit(1)

	#start UH
	import horizons.main
	ret = True
	if not options.profile:
		# start normal
		ret = horizons.main.start(options)
	else:
		# start with profiling
		try:
			import cProfile as profile
		except ImportError:
			import profile

		from horizons.constants import PATHS
		profiling_dir = os.path.join(PATHS.USER_DIR, 'profiling')
		if not os.path.exists(profiling_dir):
			os.makedirs(profiling_dir)

		outfilename = os.path.join(profiling_dir, time.strftime('%Y-%m-%d_%H-%M-%S') + '.prof')
		print('Starting in profile mode. Writing output to: %s' % outfilename)
		profile.runctx('horizons.main.start(options)', globals(), locals(), outfilename)
		print('Program ended. Profiling output: %s' % outfilename)

	if logfile:
		logfile.close()
	if ret:
		print(_('Thank you for using Unknown Horizons!'))
    def profile(self,methodstatement, locals={},globals={}):
        """
        create a wrapper method which has no args and then pass that wrapper method to this method as first arg
        method is passed as a string e.g. 'listDirTest()'
        it remove stats is False the path where the stats are will be returned
        make sure that whatever arguments used in the statement are passed to the globals

        example:

        import JumpScale.tools.performancetrace
        do=j.tools.performancetrace.profile('test0b()', globals=globals())

        """
        import cProfile
        import pstats
        path=j.sal.fs.joinPaths(j.dirs.tmpDir,"perftest","%s.log"%j.data.idgenerator.generateRandomInt(1,10000))        
        j.sal.fs.createDir(j.sal.fs.joinPaths(j.dirs.tmpDir,"perftest"))        
        globs = {
            '__file__': "afile",
            '__name__': '__main__',
            '__package__': None,
        }
        globals.update(globs)
        cProfile.runctx(methodstatement, globals, locals, path)
        p1 = pstats.Stats(path)

        # p1.strip_dirs().sort_stats('cum').print_stats(100)
        p1.strip_dirs().sort_stats('time').print_stats(100)
        j.sal.fs.removeDirTree(path)
        return p1
Пример #23
0
def Run(fileName = None, period = 0.2, real = False, 
        verbose = 0, profiling = False):
    """Run once """

    import ioflo.base.skedding as skedding

    if not fileName:
        fileName = "../../app/plan/meta.flo"


    print "Building ..."
    skedder = tasking.Skedder(name = "TestTasker",
                              period = period,
                              real = real, 
                              filePath = fileName)
    if skedder.build():
        print "\n\nStarting mission from file %s...\n" % (fileName)
        if not profiling:
            skedder.run()
        else:
            import cProfile
            import pstats         

            cProfile.runctx('skedder.run()',globals(),locals(), './profiles/skedder')
            p = pstats.Stats('./profiles/skedder')
            p.sort_stats('time').print_stats()
            p.print_callers()
            p.print_callees()


    return skedder
Пример #24
0
def main():
    parser = argparse.ArgumentParser(description="""
    Slocum -- A tool for ocean passage planning.

    Joshua Slocum (February 20, 1844 -on or shortly after November 14, 1909)
    was a Canadian-American seaman and adventurer, a noted writer, and the
    first man to sail single-handedly around the world. In 1900 he told the
    story of this in Sailing Alone Around the World. He disappeared in
    November 1909 while aboard his boat, the Spray. (wikipedia)""")

    # add subparser for each task
    subparsers = parser.add_subparsers()
    for k in _task_handler.keys():
        func, p_setup = _task_handler[k]
        p = subparsers.add_parser(k, help=func.__doc__)
        p.set_defaults(func=func)
        p_setup(p)

    if (len(sys.argv) < 2):
      handle_gui()
    else:
      # parse the arguments and run the handler associated with each task
      args = parser.parse_args()
      args.input = args.input_file or args.input
      args.output = args.output_file or args.output
      if args.profile:
          import cProfile
          cProfile.runctx('args.func(args)', globals(), locals(), args.profile)
      else:
          args.func(args)
 def run(self, profile=False):
     self.knownModificationEvents = ["modified_base", "m6A", "m4C", "m5C"]
     if profile:
         cProfile.runctx("self._mainLoop()", globals=globals(), locals=locals(), filename="profile.out")
         return 0
     else:
         return self._mainLoop()
Пример #26
0
 def start(self):
     """
     Entry point for a command.  Responsible for setting up a context and
     calling the relevant pre/run/post methods in the right order.  Don't
     override unless you know what you're doing.
     """
     self._load_early_options()
     self._pre_enter()
     with self:
         self._pre_load_options()
         self._load_options()
         self._pre_run()
         if self.cprofile:
             try:
                 fn = self.cprofile_filename
             except AttributeError:
                 fn = None
             import cProfile
             cProfile.runctx('self.run()', globals(), locals(), filename=fn)
             if fn:
                 self._out("wrote profile stats to %s" % fn)
         else:
             self.run()
         self._post_run()
         self._run_next()
     self._end()
Пример #27
0
def main(options):
    check_and_adjust(options)

    matching.__okErrF = sw.fedList(options.okErrF)
    matching.__utcaBcnDelta = options.utcaBcnDelta
    matching.__utcaPipelineDelta = options.utcaPipelineDelta
    if options.noColor:
        printer.__color = False

    if options.noLoop:
        goCode = 0
    else:
        analyze.setup()
        if options.profile:
            import cProfile
            cProfile.runctx("go(options)", globals(), locals(), sort="time")
            goCode = 0  # FIXME
        else:
            goCode = go(options)

    if options.feds2 and 0 <= options.dump:
        analyze.printChannelSummary(options.outputFile)

    if not options.noPlot:
        graphs.main(options)

    return goCode
Пример #28
0
def profilerCommandLineRun(cntlr, options, sourceZipStream=None, **kwargs):
    from arelle import Locale
    import cProfile, pstats, sys, time
    profileReportFile = getattr(options, "profilerReportFile", None)
    if profileReportFile and not getattr(cntlr, "blockNestedProfiling", False):
        startedAt = time.time()
        cntlr.addToLog(_("invoking command processing under profiler"))
        statsFile = profileReportFile + ".bin"
        cntlr.blockNestedProfiling = True
        cProfile.runctx("cntlr.run(options, sourceZipStream)", globals(), locals(), statsFile)
        cntlr.addToLog(Locale.format_string(cntlr.modelManager.locale, 
                                            _("profiled command processing completed in %.2f secs"), 
                                            time.time() - startedAt))
        # specify a file for log
        priorStdOut = sys.stdout
        sys.stdout = open(profileReportFile, "w")
    
        statObj = pstats.Stats(statsFile)
        statObj.strip_dirs()
        statObj.sort_stats("time")
        statObj.print_stats()
        statObj.print_callees()
        statObj.print_callers()
        sys.stdout.flush()
        sys.stdout.close()
        del statObj
        sys.stdout = priorStdOut
        os.remove(statsFile)
        del cntlr.blockNestedProfiling
        sys.exit() # raise SYSTEM_EXIT to stop outer execution
Пример #29
0
 def call(*args, **kwargs):
     ldict = locals().copy()
     ldict.update({"fn": fn, "args": args, "kwargs": kwargs})
     if appendDate:
         out = "%s-%s" % (out, datetime.datetime.now().isoformat())
     profile.runctx("rtn = fn(*args, **kwargs)", globals(), ldict, out)
     return ldict["rtn"]
Пример #30
0
def main():
    gsize = (2, 2, 2)
    x1 = -10.0; x2 = -5.0  # pylint: disable=C0321
    y1 = -5.0; y2 = 5.0  # pylint: disable=C0321
    z1 = -5.0; z2 = 5.0  # pylint: disable=C0321
    vol = seed.Volume((z1, y1, x1), (z2, y2, x2), gsize)

    f3d = readers.load_file("/Users/kmaynard/dev/work/t1/t1.3df.004320.xdmf")
    fld_bx = f3d["bx"]
    fld_by = f3d["by"]
    fld_bz = f3d["bz"]

    B = field.scalar_fields_to_vector([fld_bx, fld_by, fld_bz], name="B_cc",
                                      _force_layout=field.LAYOUT_INTERLACED)
    topo_arr = np.empty(gsize, order='C', dtype='int')
    lines, topo = None, None
    t0 = time()
    cProfile.runctx("nsegs = py_get_topo(B, topo_arr, x1, x2, y1, y2, z1, z2)",
                    globals(), locals(), "topo.prof")
    t1 = time()
    s = pstats.Stats("topo.prof")
    s.strip_dirs().sort_stats("tottime").print_stats()
    nsegs = py_get_topo(B, topo_arr, x1, x2, y1, y2, z1, z2)
    t = t1 - t0

    print(topo_arr)

    # print("numba time: {0}s, {1}s/seg".format(t, t / nsegs))
    print("numba time: {0}s".format(t))
Пример #31
0
#!/usr/bin/env python
# encoding: utf-8
"""
Add `#cython: profile=True` on top or the rnacounter script, with other compiler directives.

Then run

python profiling.py -o zzz -n 1 -c chr18 /archive/epfl/bbcf/jdelafon/test_rnaseq/mefKO.bam /archive/epfl/bbcf/jdelafon/test_rnaseq/mm9_renamed.gtf

python profiling.py -o zzz -n 1 testfiles/gapdhKO.bam testfiles/mm9_3genes_renamed.gtf ;

(need "-o sth" because closing stdout produces a "ValueError: I/O operation or closed file".)
"""

import pstats, cProfile
import pyximport
pyximport.install()
import rnacounter
from rnacounter import rnacounter_main, parse_args
from docopt import docopt

args = docopt(rnacounter.__doc__, version='0.1')
bamname, annotname, options = parse_args(args)

cProfile.runctx(
    "rnacounter_main('%s','%s',%s)" % (bamname, annotname, options), globals(),
    locals(), "stats_profiling.prof")
s = pstats.Stats("stats_profiling.prof")
s.strip_dirs().sort_stats("time").print_stats()
Пример #32
0
def runProfiled(cmd, level=1.0):
    "run a command profiled and output results"
    cProfile.runctx(cmd, globals(), locals(), filename="slicer.prof")
    p = pstats.Stats('slicer.prof')
    p.sort_stats('cum')
    p.print_stats(level)
Пример #33
0
 def start_profiling(self, snapshot, index):
     cProfile.runctx('self.get_calc_save(snapshot, index)',
                     globals=globals(), locals=locals(), filename='profile.prof')
Пример #34
0
from eleve import Segmenter, MemoryStorage, LeveldbStorage


def benchmark(storage_class, create=True):
    m = storage_class(4)
    s = Segmenter(m, 3)
    if create:
        m.clear()

    corpus = reuters.raw()

    tokens = list(filter(lambda t: t.category == "",
                         tokeniser_fr(corpus)))[:10000]

    if create:
        m.add_sentence(tokens)

    for i in range(1, 5000, 30):
        print(s.segment(tokens[i:i + 30]))


if __name__ == "__main__":
    import pstats, cProfile
    import pyximport

    pyximport.install()
    cProfile.runctx("benchmark(MemoryStorage)", globals(), locals(),
                    "profile.prof")
    s = pstats.Stats("profile.prof")
    s.strip_dirs().sort_stats("time").print_stats()
Пример #35
0
    pts = getEllipsePts(e)
    pts.shape = -1, 1, 2
    pts = pts.astype('float32')

    _, rot, trans = cam_model.solvePnP(target_pts3D, pts.astype('float32'))

    return trans, rot

if __name__ == '__main__':
    def bench():
        import cv2
        cap = cv2.VideoCapture(0)
        cap.set(3,1280)
        cap.set(4,720)
        for x in range(100):
            sts,img = cap.read()
            # img = cv2.imread('/Users/mkassner/Desktop/manual_calibration_marker-01.png')
            gray  = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            print(len(find_concetric_circles(gray,visual_debug=img)))
            cv2.imshow('img',img)
            cv2.waitKey(1)
            # return


    import cProfile,subprocess,os
    cProfile.runctx("bench()",{},locals(),"world.pstats")
    loc = os.path.abspath(__file__).rsplit('pupil_src', 1)
    gprof2dot_loc = os.path.join(loc[0], 'pupil_src', 'shared_modules','gprof2dot.py')
    subprocess.call("python "+gprof2dot_loc+" -f pstats world.pstats | dot -Tpng -o world_cpu_time.png", shell=True)
    print("created  time graph for  process. Please check out the png next to this file")
Пример #36
0
        try:
            args = linter.load_command_line_configuration(args)
        except SystemExit, exc:
            if exc.code == 2:  # bad options
                exc.code = 32
            raise
        if not args:
            print linter.help()
            sys.exit(32)
        # insert current working directory to the python path to have a correct
        # behaviour
        linter.prepare_import_path(args)
        if self.linter.config.profile:
            print >> sys.stderr, '** profiled run'
            import cProfile, pstats
            cProfile.runctx('linter.check(%r)' % args, globals(), locals(),
                            'stones.prof')
            data = pstats.Stats('stones.prof')
            data.strip_dirs()
            data.sort_stats('time', 'calls')
            data.print_stats(30)
        else:
            linter.check(args)
        linter.cleanup_import_path()
        if exit:
            sys.exit(self.linter.msg_status)

    def cb_set_rcfile(self, name, value):
        """callback for option preprocessing (i.e. before optik parsing)"""
        self._rcfile = value

    def cb_add_plugins(self, name, value):
Пример #37
0
def create_command_line_solver(problem, parser=None):
    """Convert a given problem implementation to a
    command-line example by exposing the
    :func:`pybnb.solver.solve` function arguments using
    argparse."""
    import os
    import tempfile
    # for profiling
    try:
        import cProfile as profile
    except ImportError:                                #pragma:nocover
        import profile
    try:
        import pstats
        pstats_available=True
    except ImportError:                                #pragma:nocover
        pstats_available=False
    import pybnb
    try:
        import yaml
    except ImportError:                                #pragma:nocover
        raise ImportError("The PyYAML module is required to "
                          "run the command-line solver.")
    from pybnb.convergence_checker import \
        _auto_queue_tolerance
    if parser is None:
        import argparse
        parser = argparse.ArgumentParser(
            description="Run parallel branch and bound",
            formatter_class=argparse.\
                ArgumentDefaultsHelpFormatter)

    solver_init_defaults = get_default_args(
        pybnb.Solver.__init__)
    solver_init_docs = get_keyword_docs(
        pybnb.Solver.__doc__)
    assert set(solver_init_defaults.keys()) == \
        set(solver_init_docs.keys())
    solver_init_defaults.pop("comm")
    solver_init_docs.pop("comm")
    assert len(solver_init_defaults) == len(solver_init_docs)
    for key in solver_init_defaults:
        assert solver_init_defaults[key] == \
            solver_init_docs[key]["default"]
        assert "choices" not in solver_init_docs[key]
    parser.add_argument(
        "--dispatcher-rank",
        type=int,
        default=solver_init_defaults.pop("dispatcher_rank"),
        help=solver_init_docs["dispatcher_rank"]["doc"])
    assert len(solver_init_defaults) == 0, str(solver_init_defaults)

    solve_defaults = get_default_args(
        pybnb.Solver.solve)
    solve_docs = get_keyword_docs(
        pybnb.Solver.solve.__doc__)
    solve_docs.pop("problem")
    assert set(solve_defaults.keys()) == \
        set(solve_docs.keys())
    solve_defaults.pop("best_node")
    solve_docs.pop("best_node")
    solve_defaults.pop("initialize_queue")
    solve_docs.pop("initialize_queue")
    solve_defaults.pop("scale_function")
    solve_docs.pop("scale_function")
    solve_defaults.pop("log")
    solve_docs.pop("log")
    assert len(solve_defaults) == len(solve_docs)
    for key in solve_defaults:
        if key == "queue_tolerance":
            assert "default" not in solve_docs[key]
            assert solve_defaults[key] is \
                _auto_queue_tolerance
        else:
            assert solve_defaults[key] == \
                solve_docs[key]["default"]
        assert "choices" not in solve_docs[key]
        if key == "queue_strategy":
            solve_docs[key]["choices"] = \
                [v_.value for v_ in pybnb.QueueStrategy]
            assert "**(D)**" in solve_docs[key]["doc"]
            solve_docs[key]["doc"] = \
                ("**(D)** Sets the strategy for prioritizing "
                 "nodes in the central dispatcher queue. Can "
                 "also be set to a comma-separated list of "
                 "choices to define a lexicographic sorting "
                 "strategy.")
    class _QueueStrategyJoin(argparse.Action):    #pragma:nocover
        def __init__(self, option_strings, dest, nargs=None, **kwargs):
            if nargs is not None:
                raise ValueError("nargs not allowed")
            super(_QueueStrategyJoin, self).__init__(option_strings,
                                                     dest,
                                                     **kwargs)
        def __call__(self, parser, namespace, values, option_string=None):
            if values in solve_docs["queue_strategy"]["choices"]:
                namespace.queue_strategy = values
            else:
                assert "," in values
                vals = tuple(v.strip() for v in values.split(',') if v.strip())
                assert len(vals) > 0
                assert all(v in solve_docs["queue_strategy"]["choices"]
                           for v in vals)
                namespace.queue_strategy = vals
    class _QueueStrategyChoices(object):          #pragma:nocover
        def __contains__(self, val):
            if val in solve_docs["queue_strategy"]["choices"]:
                return True
            if "," not in val:
                return False
            vals = [v.strip() for v in val.split(',') if v.strip()]
            if len(vals) == 0:
                return False
            return all(v in solve_docs["queue_strategy"]["choices"]
                       for v in vals)
        def __iter__(self):
            return solve_docs["queue_strategy"]["choices"].__iter__()
    parser.add_argument(
        "--best-objective",
        type=float,
        default=solve_defaults.pop("best_objective"),
        help=solve_docs["best_objective"]["doc"])
    tmp_ = solve_defaults.pop("disable_objective_call")
    assert not tmp_
    del tmp_
    parser.add_argument(
        "--disable-objective-call",
        default=False,
        action="store_true",
        help=solve_docs["disable_objective_call"]["doc"])
    parser.add_argument(
        "--queue-strategy",
        type=str,
        choices=_QueueStrategyChoices(),
        action=_QueueStrategyJoin,
        default=solve_defaults.pop("queue_strategy"),
        help=solve_docs["queue_strategy"]["doc"])
    parser.add_argument(
        "--absolute-gap",
        type=float,
        default=solve_defaults.pop("absolute_gap"),
        help=solve_docs["absolute_gap"]["doc"])
    parser.add_argument(
        "--relative-gap",
        type=float,
        default=solve_defaults.pop("relative_gap"),
        help=solve_docs["relative_gap"]["doc"])
    parser.add_argument(
        "--objective-stop",
        type=float,
        default=solve_defaults.pop("objective_stop"),
        help=solve_docs["objective_stop"]["doc"])
    parser.add_argument(
        "--bound-stop",
        type=float,
        default=solve_defaults.pop("bound_stop"),
        help=solve_docs["bound_stop"]["doc"])
    parser.add_argument(
        "--node-limit",
        type=int,
        default=solve_defaults.pop("node_limit"),
        help=solve_docs["node_limit"]["doc"])
    parser.add_argument(
        "--time-limit",
        type=float,
        default=solve_defaults.pop("time_limit"),
        help=solve_docs["time_limit"]["doc"])
    parser.add_argument(
        "--queue-limit",
        type=int,
        default=solve_defaults.pop("queue_limit"),
        help=solve_docs["queue_limit"]["doc"])
    val = solve_defaults.pop("track_bound")
    assert val
    parser.add_argument(
        "--disable-track-bound",
        action="store_false",
        dest="track_bound",
        default=True,
        help=solve_docs["track_bound"]["doc"])
    def _float_or_None(val):                      #pragma:nocover
        if val == "None":
            return None
        return float(val)
    parser.add_argument(
        "--queue-tolerance",
        type=_float_or_None,
        default=solve_defaults.pop("queue_tolerance"),
        help=solve_docs["queue_tolerance"]["doc"])
    parser.add_argument(
        "--branch-tolerance",
        type=_float_or_None,
        default=solve_defaults.pop("branch_tolerance"),
        help=solve_docs["branch_tolerance"]["doc"])
    parser.add_argument(
        "--comparison-tolerance",
        type=float,
        default=solve_defaults.pop("comparison_tolerance"),
        help=solve_docs["comparison_tolerance"]["doc"])
    parser.add_argument(
        "--log-interval-seconds",
        type=float,
        default=solve_defaults.pop("log_interval_seconds"),
        help=solve_docs["log_interval_seconds"]["doc"])
    val = solve_defaults.pop("log_new_incumbent")
    assert val
    parser.add_argument(
        "--disable-log-new-incumbent",
        action="store_false",
        dest="log_new_incumbent",
        default=True,
        help=solve_docs["log_new_incumbent"]["doc"])
    val = solve_defaults.pop("disable_signal_handlers")
    assert not val
    parser.add_argument(
        "--disable-signal-handlers",
        action="store_true",
        default=False,
        help=solve_docs["disable_signal_handlers"]["doc"])
    assert len(solve_defaults) == 0, str(solve_defaults)

    parser.add_argument(
        "--log-filename", type=str, default=None,
        help=("A filename to store solver output into."))
    parser.add_argument(
        "--results-filename", type=str, default=None,
        help=("When set, saves the solver results into a "
              "YAML-formatted file with the given name."))
    parser.add_argument(
        "--disable-mpi", default=False,
        action="store_true",
        help=("Do not attempt to import mpi4py.MPI. "
              "Enabling this option is equivalent to "
              "creating a Solver with `comm=None`."))
    if pstats_available:
        parser.add_argument(
            "--profile", dest="profile", type=int, default=0,
            help=("Enable profiling by setting this "
                  "option to a positive integer (the "
                  "maximum number of functions to "
                  "profile)."))
    parser.add_argument('--version',
                        action='version',
                        version='pybnb '+str(pybnb.__version__))
    nested_solver_defaults = get_default_args(
        pybnb.futures.NestedSolver.__init__)
    nested_solver_docs = get_keyword_docs(
        pybnb.futures.NestedSolver.__doc__)
    nested_solver_docs.pop("problem")
    assert len(nested_solver_defaults) == len(nested_solver_docs)
    parser.add_argument(
        "--nested-solver",
        action="store_true",
        default=False,
        help=("**(W)** Wraps the problem in a "
              ":class:`pybnb.futures.NestedSolver` object. "
              "See additional --nested-solver-* options."))
    parser.add_argument(
        "--nested-node-limit",
        type=int,
        default=nested_solver_defaults.pop("node_limit"),
        help=nested_solver_docs["node_limit"]["doc"])
    parser.add_argument(
        "--nested-time-limit",
        type=float,
        default=nested_solver_defaults.pop("time_limit"),
        help=nested_solver_docs["time_limit"]["doc"])
    parser.add_argument(
        "--nested-queue-limit",
        type=int,
        default=nested_solver_defaults.pop("queue_limit"),
        help=nested_solver_docs["queue_limit"]["doc"])
    val = nested_solver_defaults.pop("track_bound")
    assert val
    parser.add_argument(
        "--nested-disable-track-bound",
        action="store_false",
        dest="nested_track_bound",
        default=True,
        help=nested_solver_docs["track_bound"]["doc"])
    parser.add_argument(
        "--nested-queue-strategy",
        type=str,
        choices=_QueueStrategyChoices(),
        action=_QueueStrategyJoin,
        default=nested_solver_defaults.pop("queue_strategy"),
        help=nested_solver_docs["queue_strategy"]["doc"])
    assert len(nested_solver_defaults) == 0,\
        str(nested_solver_defaults)

    args = parser.parse_args()

    try:
        import mpi4py
    except ImportError:                                #pragma:nocover
        if not args.disable_mpi:
            raise ImportError("The mpi4py module is not "
                              "available. To run this script "
                              "without it, use the '--disable-mpi' "
                              "option")

    if pstats_available and (args.profile):            #pragma:nocover
        #
        # Call the main routine with profiling.
        #
        handle, tfile = tempfile.mkstemp()
        os.close(handle)
        try:
            profile.runctx("_run_command_line_solver(problem, args)",
                           globals(),
                           locals(),
                           tfile)
            p = pstats.Stats(tfile).strip_dirs()
            p.sort_stats("time", "cumulative")
            p = p.print_stats(args.profile)
            p.print_callers(args.profile)
            p.print_callees(args.profile)
            p = p.sort_stats("cumulative","calls")
            p.print_stats(args.profile)
            p.print_callers(args.profile)
            p.print_callees(args.profile)
            p = p.sort_stats("calls")
            p.print_stats(args.profile)
            p.print_callers(args.profile)
            p.print_callees(args.profile)
        finally:
            os.remove(tfile)
    else:
        _run_command_line_solver(problem, args)
Пример #38
0
                    pause = not pause
                if keyinput[pygame.K_r]:
                    viewer_distance = 256
                    fov = 2
            if pause is not True:
                # clear screen
                surface.fill((0, 0, 0, 255))
                for thing in objects:
                    thing.update()
                pygame.display.flip()
            frames -= 1
        duration = time.time() - anim_starttime
        print "Done 100 Frames in %f seonds, average %f fps" % (duration,
                                                                100 / duration)
        print "Whole program duration %f seconds" % (time.time() -
                                                     total_starttime)
    except KeyboardInterrupt:
        print 'shutting down'


if __name__ == "__main__":
    test()
    sys.exit(0)
    import cProfile
    import pstats
    profile = "profiles/%s.profile" % sys.argv[0].split(".")[0]
    cProfile.runctx("test()", globals(), locals(), filename=profile)
    s = pstats.Stats(profile)
    s.sort_stats('time')
    s.print_stats()
Пример #39
0
 def _onFilterRegExpChanged(self, text):
     cProfile.runctx('self._onFilterRegExpChanged(text)', globals(),
                     locals())
Пример #40
0
                    
            # Store frequency
            fmax[i, 0:2] = [(i*time_le+fourier_le/2)*dt, a-2.5*deltaf]
        # Lower frequency otherwise
        else:        
            while abs(deltaf)>dfmin:
                F = abs(local_trapz(m.foo(a-deltaf,V_temp,expon)))
                if F > essaimax:
                    essaimax = F
                    a -= deltaf
                else:
                    deltaf = -deltaf/5
        
            # Store frequency
            fmax[i, 0:2] = [(i*time_le+fourier_le/2)*dt, a+2.5*deltaf]
        
    # Save calculation in file
    fmax[:,2] = smooth(fmax[:,1])[5:-5]
    savetxt(outputfile, fmax)

i=0
while i<len(sys.argv):
    # read input and output files from arg
    if sys.argv[i]=='-i':
        inputfile=sys.argv[i+1]
    elif sys.argv[i]=='-o':
        outputfile=sys.argv[i+1]
    i += 1

cProfile.runctx('tdo_fft(inputfile,outputfile)', globals(), locals(), 'oupsss.txt')
#tdo_fft(inputfile, outputfile)
Пример #41
0
def _run_command_impl(command, parser, args, name, data, options):
    #
    # Call the main Pyomo runner with profiling
    #
    retval = None
    errorcode = 0
    pcount = options.runtime.profile_count
    if pcount > 0:
        # Defer import of profiling packages until we know that they
        # are needed
        try:
            try:
                import cProfile as profile
            except ImportError:
                import profile
            import pstats
        except ImportError:
            raise ValueError(
                "Cannot use the 'profile' option: the Python "
                "'profile' or 'pstats' package cannot be imported!")
        tfile = TempfileManager.create_tempfile(suffix=".profile")
        tmp = profile.runctx(
            command.__name__ + '(options=options,parser=parser)',
            command.__globals__, locals(), tfile)
        p = pstats.Stats(tfile).strip_dirs()
        p.sort_stats('time', 'cumulative')
        p = p.print_stats(pcount)
        p.print_callers(pcount)
        p.print_callees(pcount)
        p = p.sort_stats('cumulative', 'calls')
        p.print_stats(pcount)
        p.print_callers(pcount)
        p.print_callees(pcount)
        p = p.sort_stats('calls')
        p.print_stats(pcount)
        p.print_callers(pcount)
        p.print_callees(pcount)
        retval = tmp
    else:
        #
        # Call the main Pyomo runner without profiling
        #
        try:
            retval = command(options=options, parser=parser)
        except SystemExit:
            err = sys.exc_info()[1]
            #
            # If debugging is enabled or the 'catch' option is specified, then
            # exit.  Otherwise, print an "Exiting..." message.
            #
            if __debug__ and (options.runtime.logging == 'debug'
                              or options.runtime.catch_errors):
                sys.exit(0)
            print('Exiting %s: %s' % (name, str(err)))
            errorcode = err.code
        except Exception:
            err = sys.exc_info()[1]
            #
            # If debugging is enabled or the 'catch' option is specified, then
            # pass the exception up the chain (to pyomo_excepthook)
            #
            if __debug__ and (options.runtime.logging == 'debug'
                              or options.runtime.catch_errors):
                raise

            if not options.model is None and not options.model.save_file is None:
                model = "model " + options.model.save_file
            else:
                model = "model"

            global filter_excepthook
            if filter_excepthook:
                action = "loading"
            else:
                action = "running"

            msg = "Unexpected exception while %s %s:\n    " % (action, model)
            #
            # This handles the case where the error is propagated by a KeyError.
            # KeyError likes to pass raw strings that don't handle newlines
            # (they translate "\n" to "\\n"), as well as tacking on single
            # quotes at either end of the error message. This undoes all that.
            #
            errStr = str(err)
            if type(err) == KeyError and errStr != "None":
                errStr = str(err).replace(r"\n", "\n")[1:-1]

            logger.error(msg + errStr)
            errorcode = 1

    return retval, errorcode
Пример #42
0
def main():
    options = docopt(__doc__)

    kwargs = {'func': requests.get,
              'args': tuple((urljoin(options['<url>'], domain), ) for domain in generate_domains())}
    cProfile.runctx('execute(func, args)', globals(), kwargs, sort='cumulative')
Пример #43
0
def profile(data):
    cProfile.runctx("simple_reduce(data)", globals(), locals(), "Profile.prof")

    s = pstats.Stats("Profile.prof")
    s.strip_dirs().sort_stats("time").print_stats()
Пример #44
0
    session_settings['window_size'] = glfwGetWindowSize(main_window)
    session_settings['window_position'] = glfwGetWindowPos(main_window)

    session_settings.close()
    # de-init all running plugins
    for p in g_pool.plugins:
        p.alive = False
    g_pool.plugins.clean()

    cap.close()
    glfwDestroyWindow(main_window)
    glfwTerminate()
    logger.debug("Process done")


if __name__ == '__main__':
    freeze_support()
    if 1:
        main()
    else:
        import cProfile, subprocess, os
        cProfile.runctx("main()", {}, locals(), "player.pstats")
        loc = os.path.abspath(__file__).rsplit('pupil_src', 1)
        gprof2dot_loc = os.path.join(loc[0], 'pupil_src', 'shared_modules',
                                     'gprof2dot.py')
        subprocess.call(
            "python " + gprof2dot_loc +
            " -f pstats player.pstats | dot -Tpng -o player_cpu_time.png",
            shell=True)
        print "created cpu time graph for pupil player . Please check out the png next to the main.py file"
Пример #45
0
import numpy as np
import scipy
import matplotlib.pyplot as plt
import cProfile

print 'This is script princomp_ma7.py'
import __main__ as M
self = M.mag
self.status.set('running script')

logfile = r'L:\Lab & Testing\Hideya\afar_switch_3\IL\afar_sw_IL_GSO_06may15_1230_5228_P1_L2P1_D7_S1_B29_SN1.log'
self.wclearfiles()

command_string = 'self.add_logfile( logfile )'
cProfile.runctx(command_string, globals(), locals())
self.win_load()

self.status.set('script finished')
Пример #46
0
hikari_enum_getitem_time = timeit.timeit("BasicHikariEnum['z']",
                                         number=1_000_000,
                                         globals=globals())

print("BasicPyEnum.__call__('25')", py_enum_call_time, "µs")
print("BasicHikariEnum.__call__('25')", hikari_enum_call_time, "µs")
print("BasicPyEnum._value2member_map_['25']", py_enum_delegate_to_map_time,
      "µs")
print("BasicHikariEnum._value_to_member_map['25']",
      hikari_enum_delegate_to_map_time, "µs")
print("BasicPyEnum.__getitem__['z']", py_enum_getitem_time, "µs")
print("BasicHikariEnum.__getitem__['z']", hikari_enum_getitem_time, "µs")

print("BasicPyEnum.__call__ profile")
cProfile.runctx("for i in range(1_000_000): BasicPyEnum('25')",
                globals=globals(),
                locals=locals())

print("BasicHikariEnum.__call__ profile")
cProfile.runctx("for i in range(1_000_000): BasicHikariEnum('25')",
                globals=globals(),
                locals=locals())

print("BasicPyEnum.__getitem__ profile")
cProfile.runctx("for i in range(1_000_000): BasicPyEnum['z']",
                globals=globals(),
                locals=locals())

print("BasicHikariEnum.__getitem__ profile")
cProfile.runctx("for i in range(1_000_000): BasicHikariEnum['z']",
                globals=globals(),
 def profiling_init_process(self):
     orig_init_process = orig_init_process_
     ofile = '%s%s' % (profiling_prefix, os.getpid())
     print 'Profiling worker %s, output file: %s' % (worker, ofile)
     cProfile.runctx('orig_init_process()', globals(), locals(), ofile)
Пример #48
0
 def _profile(self):
     """Run _main() under the Python profiler."""
     if self.args.profile == "console":
         self._console_profile(self._main)
     else:
         cProfile.runctx("self._main()", locals(), locals(), self.args.profile)
Пример #49
0
    def _generate(self, env):
        mlog.debug('Build started at', datetime.datetime.now().isoformat())
        mlog.debug('Main binary:', sys.executable)
        mlog.debug('Build Options:',
                   coredata.get_cmd_line_options(self.build_dir, self.options))
        mlog.debug('Python system:', platform.system())
        mlog.log(mlog.bold('The Meson build system'))
        mlog.log('Version:', coredata.version)
        mlog.log('Source dir:', mlog.bold(self.source_dir))
        mlog.log('Build dir:', mlog.bold(self.build_dir))
        if env.is_cross_build():
            mlog.log('Build type:', mlog.bold('cross build'))
        else:
            mlog.log('Build type:', mlog.bold('native build'))
        b = build.Build(env)

        intr = interpreter.Interpreter(b)
        if env.is_cross_build():
            logger_fun = mlog.log
        else:
            logger_fun = mlog.debug
        logger_fun(
            'Build machine cpu family:',
            mlog.bold(intr.builtin['build_machine'].cpu_family_method([], {})))
        logger_fun('Build machine cpu:',
                   mlog.bold(intr.builtin['build_machine'].cpu_method([], {})))
        mlog.log(
            'Host machine cpu family:',
            mlog.bold(intr.builtin['host_machine'].cpu_family_method([], {})))
        mlog.log('Host machine cpu:',
                 mlog.bold(intr.builtin['host_machine'].cpu_method([], {})))
        logger_fun(
            'Target machine cpu family:',
            mlog.bold(intr.builtin['target_machine'].cpu_family_method([],
                                                                       {})))
        logger_fun(
            'Target machine cpu:',
            mlog.bold(intr.builtin['target_machine'].cpu_method([], {})))
        try:
            if self.options.profile:
                fname = os.path.join(self.build_dir, 'meson-private',
                                     'profile-interpreter.log')
                profile.runctx('intr.run()',
                               globals(),
                               locals(),
                               filename=fname)
            else:
                intr.run()
        except Exception as e:
            mintro.write_meson_info_file(b, [e])
            raise
        # Print all default option values that don't match the current value
        for def_opt_name, def_opt_value, cur_opt_value in intr.get_non_matching_default_options(
        ):
            mlog.log(
                'Option', mlog.bold(def_opt_name), 'is:',
                mlog.bold('{}'.format(
                    make_lower_case(cur_opt_value.printable_value()))),
                '[default: {}]'.format(make_lower_case(def_opt_value)))
        try:
            dumpfile = os.path.join(env.get_scratch_dir(), 'build.dat')
            # We would like to write coredata as late as possible since we use the existence of
            # this file to check if we generated the build file successfully. Since coredata
            # includes settings, the build files must depend on it and appear newer. However, due
            # to various kernel caches, we cannot guarantee that any time in Python is exactly in
            # sync with the time that gets applied to any files. Thus, we dump this file as late as
            # possible, but before build files, and if any error occurs, delete it.
            cdf = env.dump_coredata()
            if self.options.profile:
                fname = 'profile-{}-backend.log'.format(intr.backend.name)
                fname = os.path.join(self.build_dir, 'meson-private', fname)
                profile.runctx('intr.backend.generate(intr)',
                               globals(),
                               locals(),
                               filename=fname)
            else:
                intr.backend.generate(intr)
            build.save(b, dumpfile)
            if env.first_invocation:
                coredata.write_cmd_line_file(self.build_dir, self.options)
            else:
                coredata.update_cmd_line_file(self.build_dir, self.options)

            # Generate an IDE introspection file with the same syntax as the already existing API
            if self.options.profile:
                fname = os.path.join(self.build_dir, 'meson-private',
                                     'profile-introspector.log')
                profile.runctx(
                    'mintro.generate_introspection_file(b, intr.backend)',
                    globals(),
                    locals(),
                    filename=fname)
            else:
                mintro.generate_introspection_file(b, intr.backend)
            mintro.write_meson_info_file(b, [], True)

            # Post-conf scripts must be run after writing coredata or else introspection fails.
            intr.backend.run_postconf_scripts()
        except Exception as e:
            mintro.write_meson_info_file(b, [e])
            if 'cdf' in locals():
                old_cdf = cdf + '.prev'
                if os.path.exists(old_cdf):
                    os.replace(old_cdf, cdf)
                else:
                    os.unlink(cdf)
            raise
Пример #50
0
def main(argv):
    usage = "Usage: %prog [options] map bot1 bot2\n\nYou must specify a map file."
    parser = OptionParser(usage=usage)

    # map to be played
    # number of players is determined by the map file
    parser.add_option("-m",
                      "--map_file",
                      dest="map",
                      help="Name of the map file")

    # maximum number of turns that the game will be played
    parser.add_option("-t",
                      "--turns",
                      dest="turns",
                      default=1000,
                      type="int",
                      help="Number of turns in the game")

    parser.add_option("--serial",
                      dest="serial",
                      action="store_true",
                      help="Run bots in serial, instead of parallel.")

    parser.add_option("--turntime",
                      dest="turntime",
                      default=1000,
                      type="int",
                      help="Amount of time to give each bot, in milliseconds")
    parser.add_option("--loadtime",
                      dest="loadtime",
                      default=3000,
                      type="int",
                      help="Amount of time to give for load, in milliseconds")
    parser.add_option("-r",
                      "--rounds",
                      dest="rounds",
                      default=1,
                      type="int",
                      help="Number of rounds to play")
    parser.add_option("--player_seed",
                      dest="player_seed",
                      default=None,
                      type="int",
                      help="Player seed for the random number generator")
    parser.add_option("--engine_seed",
                      dest="engine_seed",
                      default=None,
                      type="int",
                      help="Engine seed for the random number generator")

    parser.add_option('--strict',
                      dest='strict',
                      action='store_true',
                      default=False,
                      help='Strict mode enforces valid moves for bots')
    parser.add_option('--capture_errors',
                      dest='capture_errors',
                      action='store_true',
                      default=False,
                      help='Capture errors and stderr in game result')
    parser.add_option('--end_wait',
                      dest='end_wait',
                      default=0,
                      type="float",
                      help='Seconds to wait at end for bots to process end')
    parser.add_option('--secure_jail',
                      dest='secure_jail',
                      action='store_true',
                      default=False,
                      help='Use the secure jail for each bot (*nix only)')
    parser.add_option(
        '--fill',
        dest='fill',
        action='store_true',
        default=False,
        help='Fill up extra player starts with last bot specified')
    parser.add_option('-p',
                      '--position',
                      dest='position',
                      default=0,
                      type='int',
                      help='Player position for first bot specified')

    # ants specific game options
    game_group = OptionGroup(
        parser, "Game Options",
        "Options that affect the game mechanics for ants")
    game_group.add_option(
        "--attack",
        dest="attack",
        default="focus",
        help=
        "Attack method to use for engine. (closest, focus, support, damage)")
    game_group.add_option("--kill_points",
                          dest="kill_points",
                          default=2,
                          type="int",
                          help="Points awarded for killing a hill")
    game_group.add_option(
        "--food",
        dest="food",
        default="symmetric",
        help="Food spawning method. (none, random, sections, symmetric)")
    game_group.add_option("--viewradius2",
                          dest="viewradius2",
                          default=77,
                          type="int",
                          help="Vision radius of ants squared")
    game_group.add_option("--spawnradius2",
                          dest="spawnradius2",
                          default=1,
                          type="int",
                          help="Spawn radius of ants squared")
    game_group.add_option("--attackradius2",
                          dest="attackradius2",
                          default=5,
                          type="int",
                          help="Attack radius of ants squared")
    game_group.add_option("--food_rate",
                          dest="food_rate",
                          nargs=2,
                          type="int",
                          default=(5, 11),
                          help="Numerator of food per turn per player rate")
    game_group.add_option("--food_turn",
                          dest="food_turn",
                          nargs=2,
                          type="int",
                          default=(19, 37),
                          help="Denominator of food per turn per player rate")
    game_group.add_option(
        "--food_start",
        dest="food_start",
        nargs=2,
        type="int",
        default=(75, 175),
        help="One over percentage of land area filled with food at start")
    game_group.add_option(
        "--food_visible",
        dest="food_visible",
        nargs=2,
        type="int",
        default=(3, 5),
        help="Amount of food guaranteed to be visible to starting ants")
    game_group.add_option(
        "--carry_food",
        dest="carry_food",
        type="int",
        default=100,
        help=
        "Amount of food that ants can carry. If carry_food==0, food is teleported to hill (original game behaviour)"
    )
    game_group.add_option(
        "--cutoff_turn",
        dest="cutoff_turn",
        type="int",
        default=150,
        help="Number of turns cutoff percentage is maintained to end game early"
    )
    game_group.add_option(
        "--cutoff_percent",
        dest="cutoff_percent",
        type="float",
        default=0.85,
        help="Number of turns cutoff percentage is maintained to end game early"
    )
    game_group.add_option("--scenario",
                          dest="scenario",
                          action='store_true',
                          default=False)
    parser.add_option_group(game_group)

    # the log directory must be specified for any logging to occur, except:
    #    bot errors to stderr
    #    verbose levels 1 & 2 to stdout and stderr
    #    profiling to stderr
    # the log directory will contain
    #    the replay or stream file used by the visualizer, if requested
    #    the bot input/output/error logs, if requested
    log_group = OptionGroup(parser, "Logging Options",
                            "Options that control the logging")
    log_group.add_option("-g",
                         "--game",
                         dest="game_id",
                         default=0,
                         type='int',
                         help="game id to start at when numbering log files")
    log_group.add_option("-l",
                         "--log_dir",
                         dest="log_dir",
                         default=None,
                         help="Directory to dump replay files to.")
    log_group.add_option('-R',
                         '--log_replay',
                         dest='log_replay',
                         action='store_true',
                         default=False),
    log_group.add_option('-S',
                         '--log_stream',
                         dest='log_stream',
                         action='store_true',
                         default=False),
    log_group.add_option("-I",
                         "--log_input",
                         dest="log_input",
                         action="store_true",
                         default=False,
                         help="Log input streams sent to bots")
    log_group.add_option("-O",
                         "--log_output",
                         dest="log_output",
                         action="store_true",
                         default=False,
                         help="Log output streams from bots")
    log_group.add_option("-E",
                         "--log_error",
                         dest="log_error",
                         action="store_true",
                         default=False,
                         help="log error streams from bots")
    log_group.add_option('-e',
                         '--log_stderr',
                         dest='log_stderr',
                         action='store_true',
                         default=False,
                         help='additionally log bot errors to stderr')
    log_group.add_option('-o',
                         '--log_stdout',
                         dest='log_stdout',
                         action='store_true',
                         default=False,
                         help='additionally log replay/stream to stdout')
    # verbose will not print bot input/output/errors
    # only info+debug will print bot error output
    log_group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         action='store_true',
                         default=False,
                         help="Print out status as game goes.")
    log_group.add_option("--profile",
                         dest="profile",
                         action="store_true",
                         default=False,
                         help="Run under the python profiler")
    parser.add_option("--nolaunch",
                      dest="nolaunch",
                      action='store_true',
                      default=False,
                      help="Prevent visualizer from launching")
    log_group.add_option("--html",
                         dest="html_file",
                         default=None,
                         help="Output file name for an html replay")
    parser.add_option_group(log_group)

    (opts, args) = parser.parse_args(argv)
    if opts.map is None or not os.path.exists(opts.map):
        parser.print_help()
        return -1
    try:
        if opts.profile:
            # put profile file into output dir if we can
            prof_file = "ants.profile"
            if opts.log_dir:
                prof_file = os.path.join(opts.log_dir, prof_file)
            # cProfile needs to be explitly told about out local and global context
            print("Running profile and outputting to {0}".format(prof_file, ),
                  file=stderr)
            cProfile.runctx("run_rounds(opts,args)", globals(), locals(),
                            prof_file)
        else:
            # only use psyco if we are not profiling
            # (psyco messes with profiling)
            try:
                import psyco
                psyco.full()
            except ImportError:
                pass
            run_rounds(opts, args)
        return 0
    except Exception:
        traceback.print_exc()
        return -1
Пример #51
0
    def run_eval_panel_fast_coll():
        for ii in range(10000):
            eval_panel_fast_coll(zetaP, ZetaPanel, gamma_pan=3.)

    def run_eval_panel_comp():
        for ii in range(10000):
            eval_panel_comp(zetaP, ZetaPanel, gamma_pan=3.)

    def run_eval_panel_exp():
        for ii in range(10000):
            eval_panel_exp(zetaP, ZetaPanel, gamma_pan=3.)

    print(
        '------------------------------------------ profiling eval_panel_cpp')
    cProfile.runctx('run_eval_panel_cpp()', globals(), locals())

    print(
        '----------------------------------------- profiling eval_panel_fast')
    cProfile.runctx('run_eval_panel_fast()', globals(), locals())

    print(
        '------------------------------------ profiling eval_panel_fast_coll')
    cProfile.runctx('run_eval_panel_fast_coll()', globals(), locals())

    print(
        '----------------------------------------- profiling eval_panel_comp')
    cProfile.runctx('run_eval_panel_comp()', globals(), locals())

    print(
        '------------------------------------------ profiling eval_panel_exp')
Пример #52
0
def main():
    global opts
    opts = environment.parse_args(sys.argv[1:])
    if opts.killsettings:
        environment.settings_dir().remove()
    #options.create_default_manager(pida.core.environment.workspace_name())
    from pida.core import log
    log.configure()

    if not opts.debug:
        warnings.filterwarnings("ignore")

    if opts.trace:
        set_trace()

    # open workspace manager is asked for
    from pida.core.options import must_open_workspace_manager
    # we need a new optionsmanager so the default manager does not workspace
    # lookup yet

    def do_workspace_manager():

        def kill(sm):
            sm.hide_and_quit()

        file_names = [
            os.path.abspath(i)
            for i in environment.opts.files
        ]

        def command(sw, row=None):
            # command dispatcher for workspace window
            opts.safe_mode = sw.safe_mode.get_active()
            if sw.user_action == "quit":
                sys.exit(0)
            elif sw.user_action == "new" and sw.new_workspace:
                opts.workspace = sw.new_workspace
                sw.hide_and_quit()
                gtk.main_quit()
            elif sw.user_action == "select":
                if row.id:
                    from pida.utils.pdbus import PidaRemote

                    pr = PidaRemote(row.id)
                    if file_names:
                        pr.call('buffer', 'open_files', file_names)

                    pr.call('appcontroller', 'focus_window')

                    sw.user_action = "quit"
                    sys.exit(0)
                else:
                    opts.workspace = row.workspace
                    sw.hide_and_quit()
                    gtk.main_quit()

        from pida.ui.workspace import WorkspaceWindow
        sw = WorkspaceWindow(command=command)
        sw.widget.show()
        #this mainloop will exit when the workspacewindow is closes
        gtk.main()

    if opts.version:
        print _('PIDA, version %s') % pida.version
        exit(0)

    if (must_open_workspace_manager() and not environment.workspace_set()) or \
        environment.workspace_manager():
        try:
            do_workspace_manager()
        except ImportError:
            warnings.warn_explicit('python DBus bindings not available. '
                        'Not all functions available.', Warning, 'pida', '')

    if opts.profile_path:
        print "---- Running in profile mode ----"
        import cProfile
        try:
            cProfile.runctx('run_pida()', globals(), locals(), opts.profile_path)
            #benchtime, stones = prof.runcall(run_pida)
        finally:
            pass
        #signal.signal(signal.SIGALRM, force_quit)
        #signal.alarm(3)
        print "---- Top 100 statistic ----"
        import pstats
        p = pstats.Stats(opts.profile_path)
        p.strip_dirs().sort_stats('time', 'cum').print_stats(100)

        sys.exit(0)

    else:
        exit_val = run_pida()
        #XXX: hack for killing threads - better soltions
        sys.exit(exit_val)
Пример #53
0
        trainer.each_epoch.append(track_progress(**locals()))
        trainer.batch_size = 1
        batch_size = 1.
        print("Accuracy before training", model.evaluate(dev_X, dev_y))
        print("Train")
        global epoch_train_acc
        for X, y in trainer.iterate(train_X, train_y):
            # Slightly useful trick: Decay the dropout as training proceeds.
            yh, backprop = model.begin_update(X, drop=trainer.dropout)
            # No auto-diff: Just get a callback and pass the data through.
            # Hardly a hardship, and it means we don't have to create/maintain
            # a computational graph. We just use closures.
            backprop(yh - y, optimizer)

            epoch_train_acc += (yh.argmax(axis=1) == y.argmax(axis=1)).sum()

            # Slightly useful trick: start with low batch size, accelerate.
            trainer.batch_size = min(int(batch_size), max_batch_size)
            batch_size *= 1.001


if __name__ == '__main__':
    if 1:
        plac.call(main)
    else:
        import cProfile
        import pstats
        cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
        s = pstats.Stats("Profile.prof")
        s.strip_dirs().sort_stats("time").print_stats(100)
Пример #54
0
        'walltime': args.walltime,
        'kineticsdatastore': args.kineticsdatastore
    }

    if args.profile:
        import cProfile, sys, pstats, os
        global_vars = {}
        local_vars = {
            'inputFile': inputFile,
            'output_dir': output_dir,
            'kwargs': kwargs,
            'RMG': RMG
        }

        command = """rmg = RMG(inputFile=inputFile, outputDirectory=output_dir); rmg.execute(**kwargs)"""

        stats_file = os.path.join(args.output_directory, 'RMG.profile')
        print("Running under cProfile")
        if not args.postprocess:
            # actually run the program!
            cProfile.runctx(command, global_vars, local_vars, stats_file)
        # postprocess the stats
        log_file = os.path.join(args.output_directory, 'RMG.log')
        processProfileStats(stats_file, log_file)
        makeProfileGraph(stats_file)

    else:

        rmg = RMG(inputFile=inputFile, outputDirectory=output_dir)
        rmg.execute(**kwargs)
Пример #55
0
def message_processor(processor_pipe, shared_all_loci):
    cProfile.runctx("process_messages(processor_pipe, shared_all_loci)",
                    globals(), locals(), 'profile-processor.out')
Пример #56
0
    def run(self, result=None):  # pylint: disable=g-bad-name
        """Run the test case.

    This code is basically the same as the standard library, except that when
    there is an exception, the --debug (NOTYPO) flag allows us to drop into the
    raising function for interactive inspection of the test failure.

    Args:
      result: The testResult object that we will use.
    """
        if result is None:
            result = self.defaultTestResult()
        result.startTest(self)
        testMethod = getattr(  # pylint: disable=g-bad-name
            self, self._testMethodName)
        try:
            try:
                self.setUp()
            except unittest.SkipTest:
                result.addSkip(self, sys.exc_info())
                result.stopTest(self)
                return
            except:
                # Break into interactive debugger on test failure.
                if flags.FLAGS.debug:
                    pdb.post_mortem()

                result.addError(self, sys.exc_info())
                # If the setup step failed we stop the entire test suite
                # immediately. This helps catch errors in the setUp() function.
                raise

            ok = False
            try:
                profile_filename = flags.FLAGS.profile
                if profile_filename:
                    cProfile.runctx("testMethod()", globals(), locals(),
                                    profile_filename)
                else:
                    testMethod()
                    # After-test checks are performed only if the test succeeds.
                    self.DoAfterTestCheck()

                ok = True
            except self.failureException:
                # Break into interactive debugger on test failure.
                if flags.FLAGS.debug:
                    pdb.post_mortem()

                result.addFailure(self, sys.exc_info())
            except KeyboardInterrupt:
                raise
            except unittest.SkipTest:
                result.addSkip(self, sys.exc_info())
            except Exception:  # pylint: disable=broad-except
                # Break into interactive debugger on test failure.
                if flags.FLAGS.debug:
                    pdb.post_mortem()

                result.addError(self, sys.exc_info())

            try:
                self.tearDown()
            except KeyboardInterrupt:
                raise
            except Exception:  # pylint: disable=broad-except
                # Break into interactive debugger on test failure.
                if flags.FLAGS.debug:
                    pdb.post_mortem()

                result.addError(self, sys.exc_info())
                ok = False

            if ok:
                result.addSuccess(self)
        finally:
            result.stopTest(self)
Пример #57
0
import pstats, cProfile

import numpy as np

from npy_balltree import BallTree

def calc_neighbors(N=1000, D=2, k=10, leaf_size=20, filename='profile.out'):
    X = np.random.random((N,D))
    ball_tree = BallTree(X, leaf_size)
    dist, ind = ball_tree.query(X, k, return_distance=True)

cProfile.runctx("calc_neighbors()", globals(), locals(), "Profile.prof")

s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
Пример #58
0
 def __call__(self, *args):
     cProfile.runctx('self.f(self.f.__class__, *args)', globals(), locals())
Пример #59
0
def worker(file_location):

    cProfile.runctx('run_assignment(file_location)', globals(), locals(),
                    'di-fticr-di.prof')
Пример #60
0
def main():
    regex_pattern = re.compile(r"\x55\x8B\xEC.{4}\x05")
    yara_pattern = yara.compile(source=YARA_RULE)

    data_io = StringIO.StringIO()
    data_io.write(os.urandom(1024 * 1024 * 64))
    data = data_io.getvalue()

    import cProfile
    print "单条rule:"
    cProfile.runctx("test_regex(regex_pattern, data)", globals(), locals())
    cProfile.runctx("test_yara(yara_pattern, data)", globals(), locals())

    print "3条rule:"

    regex_pattern_multiple = [
        re.compile(r"\x55\x8B\xEC.{4}\x05"),
        re.compile(r"\x55\x8B\xEC.{4}\x00"),
        re.compile(r"\x55\x8B\xEC.{4}\x01")
    ]
    cProfile.runctx("test_regex_multiple(regex_pattern_multiple, data)",
                    globals(), locals())

    yara_pattern_multiple = yara.compile(source=YARA_RULE_MULTIPLE)
    cProfile.runctx("test_yara(yara_pattern_multiple, data)", globals(),
                    locals())

    print "单条rule 1000次:"

    data_io = StringIO.StringIO()
    data_io.write(os.urandom(1024 * 64))
    data = data_io.getvalue()

    cProfile.runctx("test_regex_many_times([regex_pattern], data)", globals(),
                    locals())
    cProfile.runctx("test_yara_many_times(yara_pattern, data)", globals(),
                    locals())

    print "3条rule 1000次:"

    cProfile.runctx("test_regex_many_times(regex_pattern_multiple, data)",
                    globals(), locals())
    cProfile.runctx("test_yara_many_times(yara_pattern_multiple, data)",
                    globals(), locals())