Exemple #1
0
    output_file_w = options.output_filename_w or input_file + "_w.png"
    output_file_s = options.output_filename_s or input_file + "_s.jpg"

    args = (input_file, output_file_w, output_file_s, options.image_width,
            options.image_height, options.fft_size, progress_callback,
            options.color_scheme)

    print "processing file %s:\n\t" % input_file,

    if not options.profile:
        try:
            create_wave_images(*args)
        except AudioProcessingException as e:
            print "Error running wav2png: ", e
    else:
        from hotshot import stats
        import hotshot

        prof = hotshot.Profile("stats")
        prof.runcall(create_wave_images, *args)
        prof.close()

        print "\n---------- profiling information ----------\n"
        s = stats.load("stats")
        s.strip_dirs()
        s.sort_stats("time")
        s.print_stats(30)

    print
Exemple #2
0
    return ret

def _runcommand(ui, options, cmd, cmdfunc):
    def checkargs():
        try:
            return cmdfunc()
        except TypeError, inst:
            # was this an argument error?
            tb = traceback.extract_tb(sys.exc_info()[2])
            if len(tb) != 2: # no
                raise
            raise ParseError(cmd, _("invalid arguments"))

    if options['profile']:
        import hotshot, hotshot.stats
        prof = hotshot.Profile("hg.prof")
        try:
            try:
                return prof.runcall(checkargs)
            except:
                try:
                    ui.warn(_('exception raised - generating '
                             'profile anyway\n'))
                except:
                    pass
                raise
        finally:
            prof.close()
            stats = hotshot.stats.load("hg.prof")
            stats.strip_dirs()
            stats.sort_stats('time', 'calls')
                       (orig_dt*1e6))
        # third row
        if info.instrument.find("pigot") >= 0:
            instrument = "Spigot"
        else:
            instrument = info.instrument
        ppgplot.pgmtxt('T', -3.7, 0.02, 0.0, 'Instrument: %s'%instrument)
        if (info.bary):
            ppgplot.pgmtxt('T', -3.7, 0.33, 0.0, 'MJD\dbary\u: %.12f'%info.epoch)
        else:
            ppgplot.pgmtxt('T', -3.7, 0.33, 0.0, 'MJD\dtopo\u: %.12f'%info.epoch)
        ppgplot.pgmtxt('T', -3.7, 0.73, 0.0, 'Freq\dctr\u: %.1f MHz'%\
                       ((info.numchan/2-0.5)*info.chan_width+info.lofreq))
        ppgplot.pgiden()
        ppgplot.pgend()

if __name__ == '__main__':
    if (0):
        # The following is for profiling
        import hotshot
        prof = hotshot.Profile("hotshot_edi_stats")
        prof.runcall(main)
        prof.close()
        # To see the results:
        if (0):
            from hotshot import stats
            s = stats.load("hotshot_edi_stats")
            s.sort_stats("time").print_stats()
    else:
        main()
Exemple #4
0
    object_num = 100
    object_attribute_rate = 0.3
    BR = BinaryRelation.BinaryRelation(object_num, attribute_num,
                                       object_attribute_rate)
    #print BR

    #BR = readBinaryRelation()
    # init Lattice
    myLattice = Lattice.Lattice(BR)
    del BR
    #start = time.time()
    #myLattice.doGodinAlgorithm()
    #for oneobject in BR.object_list:
    #    Lattice
    #save and output
    #usetime = time.time() - start

    #print myLattice
    #print 'the time is %.10f' % usetime
    #print '\nDone\n'
    #myLattice.doGodinAlgorithm()
    import hotshot, hotshot.stats
    prof = hotshot.Profile("lattice.prof", 1)
    prof.runcall(myLattice.doGodinAlgorithm)
    prof.close()
    print myLattice
    stats = hotshot.stats.load("lattice.prof")
    stats.strip_dirs()
    stats.sort_stats('time', 'calls')
    stats.print_stats(20)
Exemple #5
0
def main():
    (options, args) = parseCmdLineOption()

    # Make sure at least one of the args exists
    postme = []
    post_title = None
    if options.files:
        post_title = options.files
        for arg in args:
            if os.path.isfile(arg):
                postme.append(arg)
            else:
                print('ERROR: "%s" does not exist or is not a file!' % (arg))
    else:
        for arg in args:
            if os.path.isdir(arg):
                postme.append(arg)
            else:
                print('ERROR: "%s" does not exist or is not a file!' % (arg))

    if not postme:
        print('ERROR: no valid arguments provided on command line!')
        sys.exit(1)

    # Parse our configuration file
    if options.config:
        conf = ParseConfig(options.config)
    else:
        conf = ParseConfig()

    # Make sure the group is ok
    if options.group:
        if '.' not in options.group:
            newsgroup = conf['aliases'].get(options.group)
            if not newsgroup:
                print('ERROR: group alias "%s" does not exist!' %
                      (options.group))
                sys.exit(1)
        else:
            newsgroup = options.group
    else:
        newsgroup = conf['posting']['default_group']

    # Strip whitespace from the newsgroup list to obey RFC1036
    for c in (' \t'):
        newsgroup = newsgroup.replace(c, '')

    # And off we go
    poster = PostMangler(conf, options.debug)

    if options.profile:
        # TODO: replace by cProfile (PY3 compatibility)
        import hotshot
        prof = hotshot.Profile('profile.poster')
        prof.runcall(poster.post, newsgroup, postme, post_title=post_title)
        prof.close()

        import hotshot.stats
        stats = hotshot.stats.load('profile.poster')
        stats.strip_dirs()
        stats.sort_stats('time', 'calls')
        stats.print_stats(25)

    else:
        poster.post(newsgroup, postme, post_title=post_title)
Exemple #6
0
    print ' --> parse stage: %.4f ms' % ((time.clock() - start) * 1000)

    for output in template.generate():
        sys.stdout.write(output)
    print

    times = []
    for i in range(1000):
        start = time.clock()
        list(template.generate())
        times.append(time.clock() - start)
        sys.stdout.write('.')
        sys.stdout.flush()
    print

    print ' --> render stage: %s ms (average)' % (
        (sum(times) / len(times) * 1000))


if __name__ == '__main__':
    if '-p' in sys.argv:
        import hotshot, hotshot.stats
        prof = hotshot.Profile("template.prof")
        benchtime = prof.runcall(test)
        stats = hotshot.stats.load("template.prof")
        stats.strip_dirs()
        stats.sort_stats('time', 'calls')
        stats.print_stats()
    else:
        test()
Exemple #7
0
          "cvs2svn is using directory '%s' for temporary files, but\n"
          "  subdirectory '%s/cvs2svn.lock' exists, indicating that another\n"
          "  cvs2svn process is currently using '%s' as its temporary\n"
          "  workspace.  If you are certain that is not the case,\n"
          "  then remove the '%s/cvs2svn.lock' subdirectory."
          % (ctx.tmpdir, ctx.tmpdir, ctx.tmpdir, ctx.tmpdir,))
    raise

  try:
    if run_options.profiling:
      try:
        import cProfile
      except ImportError:
        # Old version of Python without cProfile.  Use hotshot instead.
        import hotshot
        prof = hotshot.Profile('cvs2svn.hotshot')
        prof.runcall(pass_manager.run, run_options)
        prof.close()
      else:
        # Recent version of Python (2.5+) with cProfile.
        def run_with_profiling():
          pass_manager.run(run_options)
        cProfile.runctx(
            'run_with_profiling()', globals(), locals(), 'cvs2svn.cProfile'
            )
    else:
      pass_manager.run(run_options)
  finally:
    try:
      os.rmdir(os.path.join(ctx.tmpdir, 'cvs2svn.lock'))
    except:
            if verbose:
                stats.print_stats()
            else:
                stats.print_stats(20)
        elif dokprofile:
            from cProfile import Profile
            import lsprofcalltree
            prof = Profile()
            prof.run('db.query_db(niter, dtype, onlyidxquery, onlynonidxquery, avoidfscache, verbose, inkernel)')
            kcg = lsprofcalltree.KCacheGrind(prof)
            ofile = open('indexed_search.kcg','w')
            kcg.output(ofile)
            ofile.close()
        elif doprofile:
            import hotshot, hotshot.stats
            prof = hotshot.Profile("indexed_search.prof")
            benchtime, stones = prof.run('db.query_db(niter, dtype, onlyidxquery, onlynonidxquery, avoidfscache, verbose, inkernel)')
            prof.close()
            stats = hotshot.stats.load("indexed_search.prof")
            stats.strip_dirs()
            stats.sort_stats('time', 'calls')
            stats.print_stats(20)
        else:
            db.query_db(niter, dtype, onlyidxquery, onlynonidxquery,
                        avoidfscache, verbose, inkernel)

    if repeatquery:
        # Start by a range which is almost None
        db.rng = [1, 1]
        if verbose:
            print "range:", db.rng
Exemple #9
0
# profile dolphy calls
import rundolphy
import hotshot

prof = hotshot.Profile("dolphy.prof")
benchtime, stones = prof.runcall(rundolphy.testIndex)
prof.close()
                    'image-classifier-%s' % opt.model),
                optimizer='sgd',
                optimizer_params={
                    'learning_rate': opt.lr,
                    'wd': opt.wd,
                    'momentum': opt.momentum
                },
                initializer=mx.init.Xavier(magnitude=2))
        mod.save_params('image-classifier-%s-%d-final.params' %
                        (opt.model, opt.epochs))
    else:
        if opt.mode == 'hybrid':
            net.hybridize()
        train(opt.epochs, context)


if __name__ == '__main__':
    if opt.profile:
        import hotshot, hotshot.stats
        prof = hotshot.Profile('image-classifier-%s-%s.prof' %
                               (opt.model, opt.mode))
        prof.runcall(main)
        prof.close()
        stats = hotshot.stats.load('image-classifier-%s-%s.prof' %
                                   (opt.model, opt.mode))
        stats.strip_dirs()
        stats.sort_stats('cumtime', 'calls')
        stats.print_stats()
    else:
        main()
def Main(args):
    """Parses arguments and does the appropriate thing."""
    util.ChangeStdoutEncoding()

    if sys.version_info < (2, 6):
        print "GRIT requires Python 2.6 or later."
        return 2
    elif not args or (len(args) == 1 and args[0] == 'help'):
        PrintUsage()
        return 0
    elif len(args) == 2 and args[0] == 'help':
        tool = args[1].lower()
        if not _GetToolInfo(tool):
            print "No such tool.  Try running 'grit help' for a list of tools."
            return 2

        print("Help for 'grit %s' (for general help, run 'grit help'):\n" %
              (tool))
        print _GetToolInfo(tool)[_FACTORY]().__doc__
        return 0
    else:
        options = Options()
        args = options.ReadOptions(args)  # args may be shorter after this
        if not args:
            print "No tool provided.  Try running 'grit help' for a list of tools."
            return 2
        tool = args[0]
        if not _GetToolInfo(tool):
            print "No such tool.  Try running 'grit help' for a list of tools."
            return 2

        try:
            if _GetToolInfo(tool)[_REQUIRES_INPUT]:
                os.stat(options.input)
        except OSError:
            print(
                'Input file %s not found.\n'
                'To specify a different input file:\n'
                '  1. Use the GRIT_INPUT environment variable.\n'
                '  2. Use the -i command-line option.  This overrides '
                'GRIT_INPUT.\n'
                '  3. Specify neither GRIT_INPUT or -i and GRIT will try to load '
                "'resource.grd'\n"
                '     from the current directory.' % options.input)
            return 2

        if options.psyco:
            # Psyco is a specializing JIT for Python.  Early tests indicate that it
            # could speed up GRIT (at the expense of more memory) for large GRIT
            # compilations.  See http://psyco.sourceforge.net/
            import psyco
            psyco.profile()

        if options.hash:
            grit.extern.FP.UseUnsignedFingerPrintFromModule(options.hash)

        toolobject = _GetToolInfo(tool)[_FACTORY]()
        if options.profile_dest:
            import hotshot
            prof = hotshot.Profile(options.profile_dest)
            prof.runcall(toolobject.Run, options, args[1:])
        else:
            toolobject.Run(options, args[1:])
#!/usr/bin/env python

from examples.demo import main

if __name__ == "__main__":
    try:
        import cProfile
        import pstats

        cProfile.run("main()", "demo-gaphas.prof")
        p = pstats.Stats("demo-gaphas.prof")
        p.strip_dirs().sort_stats("time").print_stats(40)
    except ImportError:
        import hotshot
        import hotshot.stats

        prof = hotshot.Profile("demo-gaphas.prof")
        prof.runcall(main)
        prof.close()
        stats = hotshot.stats.load("demo-gaphas.prof")
        stats.strip_dirs()
        stats.sort_stats("time", "calls")
        stats.print_stats(20)
Exemple #13
0
def start_profile(filename=None):
    if filename is None:
        filename = PROFILE_LOG_FILENAME_PATTERN % getpid()
    prof = hotshot.Profile(filename)
    return prof
Exemple #14
0
    a()


def a(count=5):
    print('a', count)
    if count:
        time.sleep(0.05)
        return a(count - 1)


if __name__ == "__main__":
    import pprint
    command = '''x()'''
    if hotshot:
        profiler = hotshot.Profile("hotshot.profile",
                                   lineevents=True,
                                   linetimings=True)
        profiler.runctx(command, globals(), locals())
        print(dir(profiler))
        profiler.close()
        print('hotshot line events', profiler.lineevents)

    profiler = cProfile.Profile(subcalls=True)
    profiler.runctx(command, globals(), locals())
    stats = profiler.getstats()
    profiler.dump_stats('cprofile.profile')

    try:
        import line_profiler
    except ImportError as err:
        pass
Exemple #15
0
    def start(self):
        super(Web, self).start()

        # Steps taken from http://www.faqs.org/faqs/unix-faq/programmer/faq/
        # Section 1.7
        if self.options.ensure_value("fork", None):
            # fork() so the parent can exit, returns control to the command line
            # or shell invoking the program.
            if os.fork():
                os._exit(0)

            # setsid() to become a process group and session group leader.
            os.setsid()

            # fork() again so the parent, (the session group leader), can exit.
            if os.fork():
                os._exit(0)

            # chdir() to esnure that our process doesn't keep any directory in
            # use that may prevent a filesystem unmount.
            import deluge.configmanager
            os.chdir(deluge.configmanager.get_config_dir())

        if self.options.pidfile:
            open(self.options.pidfile, "wb").write("%d\n" % os.getpid())

        if self.options.ensure_value("group", None):
            if not self.options.group.isdigit():
                import grp
                self.options.group = grp.getgrnam(self.options.group)[2]
            os.setuid(self.options.group)
        if self.options.ensure_value("user", None):
            if not self.options.user.isdigit():
                import pwd
                self.options.user = pwd.getpwnam(self.options.user)[2]
            os.setuid(self.options.user)

        import server
        self.__server = server.DelugeWeb()

        if self.options.base:
            self.server.base = self.options.base

        if self.options.port:
            self.server.port = self.options.port

        if self.options.ensure_value("ssl", None):
            self.server.https = self.options.ssl

        if self.options.profile:
            import hotshot
            hsp = hotshot.Profile(
                deluge.configmanager.get_config_dir("deluge-web.profile"))
            hsp.start()

        self.server.install_signal_handlers()
        self.server.start()

        if self.options.profile:
            hsp.stop()
            hsp.close()
            import hotshot.stats
            stats = hotshot.stats.load(
                deluge.configmanager.get_config_dir("deluge-web.profile"))
            stats.strip_dirs()
            stats.sort_stats("time", "calls")
            stats.print_stats(400)
                     default=False,
                     help="generate trips for OD estimation")
optParser.add_option("-f",
                     "--scale-factor",
                     dest="demandscale",
                     type="float",
                     default=1.,
                     help="scale demand by ")
optParser.add_option("-O",
                     "--output-dir",
                     dest="outputdir",
                     default=os.getcwd(),
                     help="define the output directory name and path")
(options, args) = optParser.parse_args()

if not options.netfile or not options.confile or not options.mtxpsfile:
    optParser.print_help()
    sys.exit()

if options.profile:
    import hotshot
    import hotshot.stats
    hotshotFile = "hotshot_%s_stats" % options.type
    prof = hotshot.Profile(hotshotFile)
    prof.runcall(main)
    prof.close()
    s = hotshot.stats.load(hotshotFile)
    s.strip_dirs().sort_stats("time").print_stats(20)
else:
    main()
Exemple #17
0
def hotshotProfile():
    print "using hotshot"
    import hotshot
    prof = hotshot.Profile(PROFILEFILE)
    prof.runcall(bomberman.main)
    prof.close()
Exemple #18
0
    def run(self):
        print('Starting Gaphor...')

        if self.model:
            print('Starting with model file', self.model)

        for cmd_name in self.get_sub_commands():
            self.run_command(cmd_name)
            # if self.build_lib not in sys.path:
            # sys.path.insert(0, self.build_lib)

        # os.environ['GAPHOR_DATADIR'] = os.path.abspath('data')
        if self.coverage:
            import coverage
            coverage.start()

        if self.command:
            print('Executing command: %s...' % self.command)
            exec (self.command)

        elif self.doctest:
            print('Running doctest cases in module: %s...' % self.doctest)
            import imp
            # use zope's one since it handles coverage right
            from zope.testing import doctest

            # Figure out the file:
            f = os.path.join(*self.doctest.split('.')) + '.py'
            fp = open(f)
            # Prepend module's package path to sys.path
            pkg = os.path.join(self.build_lib, *self.doctest.split('.')[:-1])
            # if pkg:
            #    sys.path.insert(0, pkg)
            #    print 'Added', pkg, 'to sys.path'
            # Load the module as local module (without package)
            test_module = imp.load_source(self.doctest.split('.')[-1], f, fp)
            failure, tests = doctest.testmod(test_module, name=self.doctest,
                                             optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE)
            if self.coverage:
                print()
                print('Coverage report:')
                coverage.report(f)
            sys.exit(failure != 0)

        elif self.unittest:
            # Running a unit test is done by opening the unit test file
            # as a module and running the tests within that module.
            print('Running test cases in unittest file: %s...' % self.unittest)
            import imp, unittest
            fp = open(self.unittest)
            test_module = imp.load_source('gaphor_test', self.unittest, fp)
            test_suite = unittest.TestLoader().loadTestsFromModule(test_module)
            # test_suite = unittest.TestLoader().loadTestsFromName(self.unittest)
            test_runner = unittest.TextTestRunner(verbosity=self.verbosity)
            result = test_runner.run(test_suite)
            if self.coverage:
                print()
                print('Coverage report:')
                coverage.report(self.unittest)
            sys.exit(not result.wasSuccessful())

        elif self.file:
            print('Executing file: %s...' % self.file)
            dir, f = os.path.split(self.file)
            print('Extending PYTHONPATH with %s' % dir)
            # sys.path.append(dir)
            exec (compile(open(self.file).read(), self.file, 'exec'), {})
        else:
            print('Launching Gaphor...')
            del sys.argv[1:]
            starter = load_entry_point('gaphor==%s' % (self.distribution.get_version(),), 'console_scripts', 'gaphor')

            if self.profile:
                print('Enabling profiling...')
                try:
                    import cProfile
                    import pstats
                    prof = cProfile.Profile()
                    prof.runcall(starter)
                    prof.dump_stats('gaphor.prof')
                    p = pstats.Stats('gaphor.prof')
                    p.strip_dirs().sort_stats('time').print_stats(20)
                except ImportError as ex:
                    import hotshot, hotshot.stats
                    prof = hotshot.Profile('gaphor.prof')
                    prof.runcall(starter)
                    prof.close()
                    stats = hotshot.stats.load('gaphor.prof')
                    stats.strip_dirs()
                    stats.sort_stats('time', 'calls')
                    stats.print_stats(20)
            else:
                starter()
Exemple #19
0
            Usage()
    runDetails.tableName = extra[0]

if __name__ == '__main__':
    if len(sys.argv) < 2:
        Usage()

    _runDetails.cmd = ' '.join(sys.argv)
    SetDefaults(_runDetails)
    ParseArgs(_runDetails)

    ShowVersion(includeArgs=1)

    if _runDetails.nRuns > 1:
        for i in range(_runDetails.nRuns):
            sys.stderr.write(
                '---------------------------------\n\tDoing %d of %d\n---------------------------------\n'
                % (i + 1, _runDetails.nRuns))
            RunIt(_runDetails)
    else:
        if _runDetails.profileIt:
            import hotshot, hotshot.stats
            prof = hotshot.Profile('prof.dat')
            prof.runcall(RunIt, _runDetails)
            stats = hotshot.stats.load('prof.dat')
            stats.strip_dirs()
            stats.sort_stats('time', 'calls')
            stats.print_stats(30)
        else:
            RunIt(_runDetails)
Exemple #20
0
def main(n, maxTime=1.0e20):
    global N, RADIUS, MAXTIME
    N = n
    RADIUS = 15 / n**.5
    MAXTIME = maxTime
    app = QApplication(sys.argv)
    cr = Jello()
    cr.app = app
    app.setMainWidget(cr)
    cr.show()
    cr.update()
    app.exec_loop()

if __name__ == "__main__":
    if PROFILING:
        prof = hotshot.Profile("jello.prof")
        def m():
            main(4, maxTime=30.0)
        prof.runcall(m)
        prof.close()
        print 'Profiling run is finished, figuring out stats'
        stats = hotshot.stats.load("jello.prof")
        stats.strip_dirs()
        stats.sort_stats('time', 'calls')
        stats.print_stats(20)
        sys.exit(0)
    try:
        n = string.atoi(sys.argv[1])
    except:
        n = 10
    main(n)
Exemple #21
0
    import pdb
    __appversion__ = "0.02a"
    print "Bitcoin trade simulator profiler v%s" % __appversion__

    print " -- this is a test script to profile the performance of bct.py"
    print " -- the trade results should be ignored as the trade strategy inputs"
    print " are designed to stress the module with many trade positions"
    print ""
    print "Profiling bct...(This is going to take a while)"
    #open the history file
    f = open("./datafeed/bcfeed_mtgoxUSD_1min.csv", 'r')
    d = f.readlines()
    f.close()

    import hotshot, hotshot.stats
    prof = hotshot.Profile("bct.prof")

    te = prof.runcall(test)
    prof.close()
    stats = hotshot.stats.load("bct.prof")
    stats.strip_dirs()
    stats.sort_stats('time', 'calls')
    stats.print_stats(20)

    print "Score:", te.score()
    print "Closing Balance:", te.balance
    print "Transaction Count: ", len(te.positions)

    #Commented out the follwing reports -- they generate very large files and in the case of this test script of limited use.
    #print "Generating reports..."
    #te.log_transactions('./report/profile_transactions.csv')
Exemple #22
0
def startup_script(main_globals):
    """
    This is the main startup script for NE1.
    It is intended to be run only once, and only by the code in main.py.
    When this function returns, the caller is intended to immediately exit
    normally.
       Parameter main_globals should be the value of globals() in __main__,
    which is needed in case .atom-debug-rc is executed, since it must be
    executed in that global namespace.
    """

    # Note: importing all of NE1's functionality can take a long time.
    # To the extent possible, we want that time to be spent after
    # something is visible to the user, but (mostly) before the main
    # window is shown to the user (since showing the main window implies
    # that NE1 is almost ready to go). So we display a splashscreen
    # before doing most imports and initializations, then set up most
    # of our data structures and UI commands (thus importing the code
    # needed to implement them), and then show the main window.
    # (Some experimental commands are initialized after that, so that
    # errors that occur then can't prevent the main window from becoming
    # visible.)

    # TODO: turn the sections of code below into named functions or methods,
    # and perhaps split before_most_imports and before_creating_app into
    # more named functions or methods. The biggest split should be between
    # functions that need to be careful to do very few or no imports,
    # and functions that are free to do any imports.

    # Windows machines spawn and remove the shell, so no info is normally
    # captured.  This is a first attempt to try to capture some of the console
    # prints that would normally be lost.  The default for this code is that
    # it's turned off, and should remain that way until it's improved.
    if NE1_Build_Constants.NE1_CONSOLE_REDIRECT and os.name == "nt":
        capture_console = False
        capture_file = ""
        # if it's not reporting as python is the executable
        if not sys.executable.upper().endswith("PYTHON.EXE") and \
           not sys.executable.upper().endswith("PYTHON"):
            try:
                capture_file = u"".join((sys.executable[:-4], "_console.log"))
                sys.stdout = open(capture_file, 'w')
                capture_console = True  # already trapped, don't try more.
            except:
                pass
        if not capture_console:
            # Haven't captured the console log yet.  Find the default user
            # path and try to capture there this happens if we can't write to
            # the normal log location, or if python.exe is the executable.
            tmpFilePath = os.path.normpath(os.path.expanduser("~/Nanorex/"))
            if not os.path.exists(tmpFilePath):  #If it doesn't exist
                try:
                    os.mkdir(tmpFilePath)  #Try making one
                    capture_console = True
                except:
                    pass
                    # we tried, but there's no easy way to capture the console
            if capture_console or os.path.isdir(tmpFilePath):
                try:  # We made the directory or it already existed, try
                    # creating the log file.
                    capture_file = os.path.normpath(u"".join((tmpFilePath, \
                                             "/NE1_console.log")))
                    sys.stdout = open(capture_file, 'w')
                    capture_console = True
                except:
                    print >> sys.__stderr__, \
                          "Failed to create any console log file."
                    capture_console = False
        if capture_console:
            # Next two lines are specifically printed to the original console
            print >> sys.__stdout__, "The console has been redirected into:"
            print >> sys.__stdout__, capture_file.encode("utf_8")
            print
            print "starting NanoEngineer-1 in [%s]," % os.getcwd(
            ), time.asctime()
            print "using Python: " + sys.version
            try:
                print "on path: " + sys.executable
            except:
                pass

    # print the version information including official release candidate if it
    # is not 0 (false)
    if NE1_Build_Constants.NE1_OFFICIAL_RELEASE_CANDIDATE:
        print "Version: NanoEngineer-1 v%s_RC%s" % \
              (NE1_Build_Constants.NE1_RELEASE_VERSION, \
               NE1_Build_Constants.NE1_OFFICIAL_RELEASE_CANDIDATE)
    else:
        print "Version: NanoEngineer-1 v%s" % \
              NE1_Build_Constants.NE1_RELEASE_VERSION

    # "Do things that should be done before most imports occur."

    startup_before_most_imports.before_most_imports(main_globals)

    from PyQt4.Qt import QApplication, QSplashScreen

    # "Do things that should be done before creating the application object."

    startup_before_most_imports.before_creating_app()
    ### TODO: this imports undo, env, debug, and it got moved earlier
    # in the startup process at some point. Those imports are probably not
    # too likely to pull in a lot of others, but if possible we should put up
    # the splash screen before doing most of them. Sometime try to figure out
    # how to do that. The point of this function is mostly to wrap every signal->slot
    # connection -- maybe it's sufficient to do that before creating the main
    # window rather than before creating the app? [bruce 071008 comment]

    # do some imports used for putting up splashscreen

    # (this must be done before any code that loads images from cad/src/ui)
    import utilities.icon_utilities as icon_utilities
    icon_utilities.initialize_icon_utilities()

    # Create the application object (an instance of QApplication).
    QApplication.setColorSpec(QApplication.CustomColor)
    #russ 080505: Make it global so it can be run under debugging below.
    global app
    app = QApplication(sys.argv)

    # Put up the splashscreen (if its image file can be found in cad/images).
    #
    # Note for developers:
    # If you don't want the splashscreen, just rename the splash image file.

    splash_pixmap = icon_utilities.imagename_to_pixmap("images/splash.png")
    # splash_pixmap will be null if the image file was not found
    if not splash_pixmap.isNull():
        splash = QSplashScreen(splash_pixmap)  # create the splashscreen
        splash.show()
        MINIMUM_SPLASH_TIME = 3.0
        # I intend to add a user pref for MINIMUM_SPLASH_TIME for A7. mark 060131.
        splash_start = time.time()
    else:
        print "note: splash.png was not found"

    # connect the lastWindowClosed signal

    from PyQt4.Qt import SIGNAL
    app.connect(app, SIGNAL("lastWindowClosed ()"), app.quit)

    # NOTE: At this point, it is ok to do arbitrary imports as needed,
    # except of experimental code.

    # import MWsemantics.

    # An old comment (I don't know if it's still true -- bruce 071008):
    # this might have side effects other than defining things.

    from ne1_ui.MWsemantics import MWsemantics

    # initialize modules and data structures

    from ne1_startup import startup_misc
    # do this here, not earlier, so it's free to do whatever toplevel imports it wants
    # [bruce 071008 change]

    startup_misc.call_module_init_functions()

    startup_misc.register_MMP_RecordParsers()
    # do this before reading any mmp files

    # create the single main window object

    foo = MWsemantics(
    )  # This does a lot of initialization (in MainWindow.__init__)

    import __main__
    __main__.foo = foo
    # developers often access the main window object using __main__.foo when debugging,
    # so this is explicitly supported

    # initialize CoNTubGenerator
    # TODO: move this into one of the other initialization functions
    #Disabling the following code that initializes the ConTub plugin
    #(in UI it is called Heterojunction.) The Heterojunction generator or
    #ConTubGenerator was never ported to Qt4 platform. The plugin generator
    #needs a code cleanup  -- ninad 2007-11-16
    ##import CoNTubGenerator
    ##CoNTubGenerator.initialize()

    # for developers: run a hook function that .atom-debug-rc might have defined
    # in this module's global namespace, for doing things *before* showing the
    # main window.

    try:
        # do this, if user asked us to by defining it in .atom-debug-rc
        func = atom_debug_pre_main_show
    except NameError:
        pass
    else:
        func()

    # Do other things that should be done just before showing the main window

    startup_misc.pre_main_show(
        foo)  # this sets foo's geometry, among other things

    foo._init_after_geometry_is_set()

    if not splash_pixmap.isNull():
        # If the MINIMUM_SPLASH_TIME duration has not expired, sleep for a moment.
        while time.time() - splash_start < MINIMUM_SPLASH_TIME:
            time.sleep(0.1)
        splash.finish(foo)  # Take away the splashscreen

    # show the main window

    foo.show()

    # for developers: run a hook function that .atom-debug-rc might have defined
    # in this module's global namespace, for doing things *after* showing the
    # main window.

    try:
        # do this, if user asked us to by defining it in .atom-debug-rc
        func = atom_debug_post_main_show
    except NameError:
        pass
    else:
        func()

    # do other things after showing the main window
    startup_misc.post_main_show(foo)

    # start psyco runtime optimizer (EXPERIMENTAL) --
    # for doc see http://psyco.sourceforge.net/
    #
    # Example: it speeds up code like this by 17 times:
    # (in my test, Intel Mac OS 10.4, Python 2.4.4)
    #   x = 17
    #   for i in range(10**7):
    #       x += i % 3 - 1
    #
    #  [bruce 080524]
    from utilities.debug_prefs import debug_pref, Choice_boolean_False
    if debug_pref("Use psyco runtime optimizer (next session)?",
                  Choice_boolean_False,
                  prefs_key=True):
        # Import Psyco if available
        try:
            import psyco
            ## psyco.full() -- insert dna takes a lot of time, then segfaults
            # after printing "inside this what's this";
            # plan: be more conservative about what it should optimize...
            # preferably bind specific functions using psyco.bind().
            # For now, just tell it to only optimize the most important ones.
            psyco.log()  # manual says: log file name looks like xxx.log-psyco
            # by default, where xxx is the name of the script you ran
            # (when I ran "python main.py" in cad/src, it wrote to main.log-psyco there)
            # (maybe we can pass our own pathname as an argument?)
            ## psyco.profile(0.2) # use profiling, optimize funcs that use
            # more than 20% of the time (not sure what that means exactly)
            # (seems safe, but from log file, i guess it doesn't do much)
            psyco.profile(0.05)  # "aggressive"
            print "using psyco"
            pass
        except ImportError:
            print "not using psyco"
            pass
        pass

    # Decide whether to do profiling, and if so, with which
    # profiling command and into what file. Set local variables
    # to record the decision, which are used later when running
    # the Qt event loop.

    # If the user's .atom-debug-rc specifies PROFILE_WITH_HOTSHOT = True,
    # use hotshot, otherwise fall back to vanilla Python profiler.
    # (Note: to work, it probably has to import this module
    #  and set this variable in this module's namespace.)
    try:
        PROFILE_WITH_HOTSHOT
    except NameError:
        PROFILE_WITH_HOTSHOT = False

    try:
        # user can set atom_debug_profile_filename to a filename in .atom-debug-rc,
        # to enable profiling into that file. For example:
        # % cd
        # % cat > .atom-debug-rc
        # atom_debug_profile_filename = '/tmp/profile-output'
        # ^D
        # ... then run NE1, and quit it
        # ... then in a python shell:
        # import pstats
        # p = pstats.Stats('<filename>')
        # p.strip_dirs().sort_stats('time').print_stats(100) # order by internal time (top 100 functions)
        # p.strip_dirs().sort_stats('cumulative').print_stats(100) # order by cumulative time
        atom_debug_profile_filename = main_globals.get(
            'atom_debug_profile_filename')
        if atom_debug_profile_filename:
            print("\nUser's .atom-debug-rc requests profiling into file %r" %
                  (atom_debug_profile_filename, ))
            if not type(atom_debug_profile_filename) in [
                    type("x"), type(u"x")
            ]:
                print "error: atom_debug_profile_filename must be a string"
                assert 0  # caught and ignored, turns off profiling
            if PROFILE_WITH_HOTSHOT:
                try:
                    import hotshot
                except:
                    print "error during 'import hotshot'"
                    raise  # caught and ignored, turns off profiling
            else:
                try:
                    import cProfile as py_Profile
                except ImportError:
                    print "Unable to import cProfile. Using profile module instead."
                    py_Profile = None
                if py_Profile is None:
                    try:
                        import profile as py_Profile
                    except:
                        print "error during 'import profile'"
                        raise  # caught and ignored, turns off profiling
    except:
        print "exception setting up profiling (hopefully reported above); running without profiling"
        atom_debug_profile_filename = None

    # Create a fake "current exception", to help with debugging
    # (in case it's shown inappropriately in a later traceback).
    # One time this is seen is if a developer inserts a call to print_compact_traceback
    # when no exception is being handled (instead of the intended print_compact_stack).
    try:
        assert 0, "if you see this exception in a traceback, it is from the" \
            " startup script called by main.py, not the code that printed the traceback"
    except:
        pass

    # Handle a mmp file passed to it via the command line.  The mmp file
    # must be the first argument (after the program name) found on the
    # command line.  All other arguments are currently ignored and only
    # one mmp file can be loaded from the command line.
    # old revision with --initial-file is at: svn rev 12759
    # Derrick 20080520
    if ((len(sys.argv) >= 2) and sys.argv[1].endswith(".mmp")):
        foo.fileOpen(sys.argv[1])

    # Do other post-startup, pre-event-loop, non-profiled things, if any
    # (such as run optional startup commands for debugging).
    startup_misc.just_before_event_loop()

    if os.environ.has_key('WINGDB_ACTIVE'):
        # Hack to burn some Python bytecode periodically so Wing's
        # debugger can remain responsive while free-running
        # [from http://wingware.com/doc/howtos/pyqt; added by bruce 081227]
        # Addendum [bruce 090107]: this timer doesn't noticeably slow down NE1,
        # but with or without it, NE1 is about 4x slower in Wing than running
        # alone, at least when running test_selection_redraw.py.
        print "running under Wing IDE debugger; setting up timer"
        from PyQt4 import QtCore
        timer = QtCore.QTimer()

        def donothing(*args):
            x = 0
            for i in range(0, 100):
                x += i

        timer.connect(timer, QtCore.SIGNAL("timeout()"), donothing)
        timer.start(200)

    # Finally, run the main Qt event loop --
    # perhaps with profiling, depending on local variables set above.
    # This does not normally return until the user asks NE1 to exit.

    # Note that there are three copies of the statement which runs that loop,
    # two inside string literals, all of which presumably should be the same.

    if atom_debug_profile_filename:
        if PROFILE_WITH_HOTSHOT:
            profile = hotshot.Profile(atom_debug_profile_filename)
            profile.run('app.exec_()')
        else:
            py_Profile.run(
                'from ne1_startup.main_startup import app; app.exec_()',
                atom_debug_profile_filename)
        print("\nProfile data was presumably saved into %r" %
              (atom_debug_profile_filename, ))
    else:
        # if you change this code, also change both string literals just above
        app.exec_()

    # Now return to the caller in order to do a normal immediate exit of NE1.

    return  # from startup_script
Exemple #23
0
 def process_request(self, request):
     if settings.DEBUG and request.GET.has_key('prof'):
         self.tmpfile = tempfile.NamedTemporaryFile()
         self.prof = hotshot.Profile(self.tmpfile.name)
Exemple #24
0
 def process_request(self, request):
     if settings.PROFILER:
         self.tmpfile = tempfile.mktemp()
         self.prof = hotshot.Profile(self.tmpfile)
Exemple #25
0
    view_x = (xedge.min(), xedge.max())
    view_y = (yedge.min(), yedge.max())

    print('making multiples', end=' ')
    p.multiples.flat[0].axis(view_x + view_y)
    filename = '%s-%s_%s_%05.2fkm_%05.1fs.%s' % (
        filename_prefix, grid_name, start_time.strftime('%Y%m%d_%H%M%S'), dx,
        time_delta.seconds, image_type)
    filename = os.path.join(outpath, filename)
    if do_save:
        fig.savefig(filename, dpi=150)

    return fig, p, frame_start_times, filename

    print(' ... done')


if __name__ == '__main__':
    do_profile = False
    if do_profile:
        import hotshot
        from hotshot import stats
        prof = hotshot.Profile("multiples_test_profile")
        prof.runcall(runtest)
        prof.close()
        s = stats.load("multiples_test_profile")
        s.sort_stats("time").print_stats()
    else:
        import sys
        res = runtest(sys.argv[1], sys.argv[2])
def profile(func, *args):
    import hotshot
    output = "/tmp/my.profile"
    p = hotshot.Profile(output)
    p.runcall(func, *args)
    p.close()
Exemple #27
0
 def process_request(self, request):
     if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.GET: 
         self.tmpfile = tempfile.mktemp()
         self.prof = hotshot.Profile(self.tmpfile)
Exemple #28
0
 def profiler(self):
     return hotshot.Profile(self.tmpfile.name)
Exemple #29
0
                              "unreachable objects")
                if not options.suppress_timing:
                    t1 = time.time()
                    print("\ncommand took %.2f seconds\n" % (t1 - t0, ))

        return None

    av = sys.argv[1:]
    if not av:
        main(av)
    firstarg = av[0].lower()
    if firstarg == "hotshot":
        import hotshot, hotshot.stats
        av = av[1:]
        prof_log_name = "XXXX.prof"
        prof = hotshot.Profile(prof_log_name)
        # benchtime, result = prof.runcall(main, *av)
        result = prof.runcall(main, *(av, ))
        print("result", repr(result))
        prof.close()
        stats = hotshot.stats.load(prof_log_name)
        stats.strip_dirs()
        stats.sort_stats('time', 'calls')
        stats.print_stats(20)
    elif firstarg == "profile":
        import cProfile
        av = av[1:]
        cProfile.run('main(av)', 'YYYY.prof')
        import pstats
        p = pstats.Stats('YYYY.prof')
        p.strip_dirs().sort_stats('cumulative').print_stats(30)
# Make a choice
block = DbsFileBlock(StorageElement=['test1', 'test3'], )

block['Name'] = "/test_primary_001/TestProcessedDS001/GEN-SIM#12345-" + str(
    HOW_MANY_FILES)
print "Inserting Files Into", api.insertBlock(proc, block)
#print "Wait........"
try:
    each_call = []
    time_taken = 0.0
    for i in range(HOW_MANY_FILES):
        rnd = str(os.popen('uuidgen').readline().strip())
        myfile1['LogicalFileName'] = 'NEW_TEST' + rnd
        #print myfile1['LogicalFileName']
        prf = rnd + '.prof'
        p = hotshot.Profile(prf)
        #Insert in a Block
        out = p.run("api.insertFiles (proc, [myfile1], block)")
        stats = hotshot.stats.load(prf)
        stats.strip_dirs()
        stats.sort_stats('time', 'calls')
        #stats.print_stats(1)
        time_taken += stats.total_tt
        each_call.append(stats.total_tt)

    print "\nTotal Time Taken: ", time_taken, "seconds"
    print "\nTime Per File Insertion: ", time_taken / HOW_MANY_FILES, "seconds\n"
    each_call.sort()
    med = (each_call[((len(each_call) - 1) / 2)] + each_call[(
        (len(each_call) + 1) / 2)]) / 2
    print "\nMEDIAN:  %s " % str(med)