Beispiel #1
0
import NN4 as file  # @UnusedImport
import cProfile  # @UnusedImport
import pstats

cProfile.run('file.main()', 'restats')
p = pstats.Stats('restats')
#p.strip_dirs().sort_stats('tottime').print_stats()
p.strip_dirs().sort_stats('time').print_stats(10)
Beispiel #2
0
    av = sys.argv[1:]
    if not av:
        main(av)
    firstarg = av[0].lower()
    if firstarg == "hotshot":
        import hotshot
        import hotshot.stats
        av = av[1:]
        prof_log_name = "XXXX.prof"
        prof = hotshot.Profile(prof_log_name)
        # benchtime, result = prof.runcall(main, *av)
        result = prof.runcall(main, *(av, ))
        print("result", repr(result))
        prof.close()
        stats = hotshot.stats.load(prof_log_name)
        stats.strip_dirs()
        stats.sort_stats('time', 'calls')
        stats.print_stats(20)
    elif firstarg == "profile":
        import cProfile
        av = av[1:]
        cProfile.run('main(av)', 'YYYY.prof')
        import pstats
        p = pstats.Stats('YYYY.prof')
        p.strip_dirs().sort_stats('cumulative').print_stats(30)
    elif firstarg == "psyco":
        PSYCO = 1
        main(av[1:])
    else:
        main(av)
    print(f"Loaded checkpoint file with {checkpoint['game_count']} games from {checkpoint['time']}")
  else:
    print(f"Training net from scratch...")
    net = Net()
    gen = 0

  start_time = time.perf_counter()
  [white, black] = [MLPPlayer(Color.White, net, True), MLPPlayer(Color.Black, net, True)]
  for i in range(0, args.games + 1):
    if i > 0 and i % args.checkpoint == 0:
      print(f"Checkpointing at {i}")
      checkpoint_path = os.path.join(net_directory(), f"net-{run_timestamp}-{i+gen:07d}.torch")
      write_checkpoint(checkpoint_path, net, i)

      if args.exhibition:
        run_exhib_match(net, BozoPlayer)
        run_exhib_match(net, DeltaPlayer)

    run_game(white, black, net)

  print(f"Finished, {(time.perf_counter() - start_time) / args.games}s per game")

  if args.profile:
    pr.disable()
    s = io.StringIO()
    ps = pstats.Stats(pr, stream=s).sort_stats(pstats.SortKey.TIME)
    ps.print_stats(0.2)
    print(s.getvalue())

  sys.exit(0)
Beispiel #4
0
    T = 1
    l = 0.0

    EF = r['Ef'] * exp(-l)
    n = r["Np"] // 2
    bulle.calculer(T, l)
    return bulle


if __name__ == "__main__":
    import time
    t1 = time.time()
    cProfile.runctx("test()", globals(), locals(), "Profile.prof")

    s = pstats.Stats("Profile.prof")
    s.strip_dirs().sort_stats("time").print_stats()
    #bulle = test()
    # profile.run(
    #     'cb.valeursLoops(l, T, bulle.param, bulle.IC, bulle.IP, bulle.IPsusc)')
    # np.savez("test", C=bulle.IC, P=bulle.IP, P2=bulle.IPsusc)
    #s = np.load("test.npz")
    #IC = s["C"]
    # II = bulle.IP
    # n = bulle.IP.shape[0]
    # for i in range(-n, n):
    #     print(f"{i}\t{II[i, 0, 0]}")
    # print(f"sum = {np.sum(II[:, 0, 0])}")
    # print(f"temps exec : {time.time()-t1}")
    #print(f" IC0={bulle.IC[:, 0, 0]}")
    # Np = r['Np']
Beispiel #5
0
@pytest.mark.parametrize('test_input, expected', multiple_namespaces)
def test_multiple_namespaces(test_input, expected):
    assert parsing.normalise(test_input) == expected


@pytest.mark.parametrize('test_input, expected', keywords_almost_in_typenames)
def test_keywords_almost_in_typenames(test_input, expected):
    assert parsing.normalise(test_input) == expected


def test_false_signatures():
    # This is an invalid function definition. Caused by a bug in Doxygen. See openbabel/src/ops.cpp : theOpCenter("center")
    from pyparsing import ParseException
    with pytest.raises(ParseException):
        parsing.normalise('("center")')


if __name__ == "__main__":
    try:
        import cProfile as profile
    except ImportError:
        import profile

    all_tests = arglists + varargs + multiple_qualifiers + functions + numbers_for_defaults + flags_in_defaults
    all_tests += all_tests + all_tests + all_tests + all_tests

    profile.runctx("for arglist in all_tests: parsing.normalise(arglist[0])", globals(), locals(), filename='parsing_profile')
    p = pstats.Stats('parsing_profile')
    p.strip_dirs().sort_stats('time', 'cumtime').print_stats(40)
Beispiel #6
0
import pstats

p = pstats.Stats('Stats.out')
p.strip_dirs()  # remove extraneous path from all the module names

## To see the profile by cumulative time in a function
# p.sort_stats('cumulative') # sort profile by cumulative time in a function

## To see what functions were looping a lot, and taking a lot of time:
p.sort_stats('time')

## Print 15 lines of the output based on the above criteria
p.print_stats(10)
Beispiel #7
0
def Profile():
    cProfile.run('ProfileTest()', 'profileout')
    p = pstats.Stats('profileout')
    p.sort_stats('cumulative').print_stats()
Beispiel #8
0
                    first = 0  # first line flag
                    for line in output:  # run through ms output
                        if first == 0:  # if first line, split positions into list
                            line = line.rstrip().split(" ")
                            first = 1  # no longer first line, change flag
                        else:  # split string of 1s and 0s into list
                            line = list(line)

                        csvsim.writerow(
                            line)  # output ms line to csv batch file
                        # This batch csv will have <snps> number of columns and 32 * (indvs + 1 blank) rows

    sim_time = time.time() - param_read_time
    print(f"Took {sim_time} seconds to perform simulations")

    end_time = time.time() - start_time
    print(f"ms_gen.py took {end_time} seconds overall")


if __name__ == '__main__':
    import cProfile, pstats
    profiler = cProfile.Profile()
    profiler.enable()
    main()
    profiler.disable()
    stats = pstats.Stats(profiler)
    stats.strip_dirs()
    stats.sort_stats('tottime')
    stats.print_stats()
    stats.dump_stats('./profiler/stats.prof')
#!/usr/bin/env python
"""Helper script for looking at the scaling and profiling in parallel.

Give this script a number of processors and optionally a number of lines of
stats to print, e.g. ``python load_prof_parallel.py 24 40``
"""
import sys
import pstats as ps

import modred as mr


prof_path = 'lincomb_r%d.prof'

num_procs = 1
num_stats = 30

if len(sys.argv) == 2:
    num_procs = int(sys.argv[1])

if len(sys.argv) == 3:
    num_procs = int(sys.argv[1])
    num_stats = int(sys.argv[2])

stats = ps.Stats(prof_path%0)
for rank in mr.range(1, num_procs):
    stats.add(prof_path % rank)

print('\n----- Sum of all processors stats -----')
stats.strip_dirs().sort_stats('cumulative').print_stats(num_stats)
Beispiel #10
0
# -*- coding: utf-8 -*-
import sys

if __name__ == "__main__":
    import cProfile
    import pytest  # NOQA
    import pstats

    script = sys.argv[1:] if len(sys.argv) > 1 else ["empty.py"]
    stats = cProfile.run("pytest.cmdline.main(%r)" % script, "prof")
    p = pstats.Stats("prof")
    p.strip_dirs()
    p.sort_stats("cumulative")
    print(p.print_stats(500))
Beispiel #11
0
def plan_actions(world,
                 use_constraints=False,
                 unit_costs=True,
                 max_time=300,
                 verbose=True,
                 **kwargs):
    # TODO: return multiple grasps instead of one
    pr = cProfile.Profile()
    pr.enable()
    with ClientSaver(world.client):
        # TODO: be careful about the table distance
        table_pose = get_pose(world.get_body(world.get_table()))
        torso_pose = get_link_pose(
            world.robot, link_from_name(world.robot, 'torso_lift_link'))
        torso_in_table = multiply(invert(table_pose), torso_pose)
        # Torso wrt table: [-0.6, 0.0, 0.33]
        print('Torso wrt table:',
              np.array(point_from_pose(torso_in_table)).round(3).tolist())
        #wait_for_interrupt()
        problem = get_pddlstream(world, **kwargs)
        p_success = 1e-2
        eager = True
        stream_info = {
            'ControlPoseCollision': FunctionInfo(p_success=p_success,
                                                 eager=eager),
            'ControlConfCollision': FunctionInfo(p_success=p_success,
                                                 eager=eager),
            'PosePoseCollision': FunctionInfo(p_success=p_success,
                                              eager=eager),
            'ConfConfCollision': FunctionInfo(p_success=p_success,
                                              eager=eager),
            'test-reachable': StreamInfo(p_success=0, eager=True),
            # TODO: these should automatically be last...
            'sample-motion': StreamInfo(p_success=1, overhead=100),
        }
        # TODO: RuntimeError: Preimage fact ('order', n0, t0) is not achievable!
        constraints = world.task.constraints if use_constraints else PlanConstraints(
        )
        solution = solve_focused(
            problem,
            planner='ff-wastar1',
            max_time=max_time,
            unit_costs=unit_costs,
            unit_efforts=True,
            effort_weight=1,
            stream_info=stream_info,
            # TODO: bug when max_skeletons=None and effort_weight != None
            max_skeletons=None,
            bind=True,
            max_failures=0,
            search_sample_ratio=0,
            constraints=constraints,
            verbose=verbose,
            debug=False)
        #solution = solve_incremental(problem, unit_costs=unit_costs, verbose=True)
        print_solution(solution)
        plan, cost, evaluations = solution
    pr.disable()
    if verbose:
        pstats.Stats(pr).sort_stats('tottime').print_stats(
            10)  # cumtime | tottime
    return plan
Beispiel #12
0
def test():
    import profile, pstats
    #profile.run("simple_test()")
    profile.run('stacked_test()', 'barchart.prof')
    p = pstats.Stats('barchart.prof')
    p.sort_stats('cumulative').print_stats(20)
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""

#end_pymotw_header
import cProfile as profile
import pstats
from profile_fibonacci_memoized import fib, fib_seq

# Create 5 set of stats
for i in range(5):
    filename = 'profile_stats_{}.stats'.format(i)
    profile.run('print({}, fib_seq(20))'.format(i), filename)

# Read all 5 stats files into a single object
stats = pstats.Stats('profile_stats_0.stats')
for i in range(1, 5):
    stats.add('profile_stats_{}.stats'.format(i))

# Clean up filenames for the report
stats.strip_dirs()

# Sort the statistics by the cumulative time spent
# in the function
stats.sort_stats('cumulative')

stats.print_stats()
################# Time Profiling #################

print("...Time Profiling...")
profiler = cProfile.Profile()
profiler.enable()
exec(open(filename).read())
profiler.disable()
s = io.StringIO()

# Output the results at the CLI:
#stats = pstats.Stats(profiler).sort_stats('ncalls')
#stats.print_stats()

# Write the results in a txt file
with open('time_profiling.txt', 'w+') as f:
    ps = pstats.Stats(profiler, stream=f)
    ps.sort_stats('ncalls')
    ps.print_stats()

# Open the txt file and read only the total time in first line
time_profiler = open(r'time_profiling.txt', "r")
time = 0
for i, line in enumerate(time_profiler):
    if i == 0:  #first line
        x = line.split()
        j = 0
        while True:
            if x[j] == "in":
                break
            else:
                j += 1
Beispiel #15
0
import cProfile, pstats, io
from azul.AzulPlayers import RandomPlayer
from azul.AzulArena import AzulArena
from azul.AzulGame import AzulGame

pr = cProfile.Profile()
pr.enable()

g = AzulGame(shouldRandomize=False)
rp1 = RandomPlayer(g).play
rp2 = RandomPlayer(g).play

for _ in range(100):
    arena = AzulArena(rp1, rp2, g, display=AzulGame.display)

    arena.playFullGame(verbose=False)

pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()

with open('azulperf2.txt', 'w+') as f:
    f.write(s.getvalue())


Beispiel #16
0
def run_command(command=None,
                parser=None,
                args=None,
                name='unknown',
                data=None,
                options=None):
    """
    Execute a function that processes command-line arguments and
    then calls a command-line driver.

    This function provides a generic facility for executing a command
    function is rather generic.  This function is segregated from
    the driver to enable profiling of the command-line execution.

    Required:
        command:    The name of a function that will be executed to perform process the command-line
                    options with a parser object.
        parser:     The parser object that is used by the command-line function.

    Optional:
        options:    If this is not None, then ignore the args option and use
                    this to specify command options.
        args:       Command-line arguments that are parsed.  If this value is `None`, then the
                    arguments in `sys.argv` are used to parse the command-line.
        name:       Specifying the name of the command-line (for error messages).
        data:       A container of labeled data.

    Returned:
        retval:     Return values from the command-line execution.
        errorcode:  0 if Pyomo ran successfully
    """
    #
    #
    # Parse command-line options
    #
    #
    retval = None
    errorcode = 0
    if options is None:
        try:
            if type(args) is argparse.Namespace:
                _options = args
            else:
                _options = parser.parse_args(args=args)
            # Replace the parser options object with a pyutilib.misc.Options object
            options = pyutilib.misc.Options()
            for key in dir(_options):
                if key[0] != '_':
                    val = getattr(_options, key)
                    if not isinstance(val, types.MethodType):
                        options[key] = val
        except SystemExit:
            # the parser throws a system exit if "-h" is specified - catch
            # it to exit gracefully.
            return Container(retval=retval, errorcode=errorcode)
    #
    # Configure loggers
    #
    configure_loggers(options=options)
    #
    # Call the main Pyomo runner with profiling
    #
    TempfileManager.push()
    pcount = options.runtime.profile_count
    if pcount > 0:
        if not pstats_available:
            msg = "Cannot use the 'profile' option.  The Python 'pstats' "    \
                  'package cannot be imported!'
            configure_loggers(shutdown=True)
            raise ValueError(msg)
        tfile = TempfileManager.create_tempfile(suffix=".profile")
        tmp = profile.runctx(
            command.__name__ + '(options=options,parser=parser)',
            command.__globals__, locals(), tfile)
        p = pstats.Stats(tfile).strip_dirs()
        p.sort_stats('time', 'cumulative')
        p = p.print_stats(pcount)
        p.print_callers(pcount)
        p.print_callees(pcount)
        p = p.sort_stats('cumulative', 'calls')
        p.print_stats(pcount)
        p.print_callers(pcount)
        p.print_callees(pcount)
        p = p.sort_stats('calls')
        p.print_stats(pcount)
        p.print_callers(pcount)
        p.print_callees(pcount)
        retval = tmp
    else:
        #
        # Call the main Pyomo runner without profiling
        #
        TempfileManager.push()
        try:
            retval = command(options=options, parser=parser)
        except SystemExit:
            err = sys.exc_info()[1]
            #
            # If debugging is enabled or the 'catch' option is specified, then
            # exit.  Otherwise, print an "Exiting..." message.
            #
            if __debug__ and (options.runtime.logging == 'debug'
                              or options.runtime.catch_errors):
                configure_loggers(shutdown=True)
                sys.exit(0)
            print('Exiting %s: %s' % (name, str(err)))
            errorcode = err.code
        except Exception:
            err = sys.exc_info()[1]
            #
            # If debugging is enabled or the 'catch' option is specified, then
            # pass the exception up the chain (to pyomo_excepthook)
            #
            if __debug__ and (options.runtime.logging == 'debug'
                              or options.runtime.catch_errors):
                configure_loggers(shutdown=True)
                TempfileManager.pop(remove=not options.runtime.keep_files)
                raise

            if not options.model is None and not options.model.save_file is None:
                model = "model " + options.model.save_file
            else:
                model = "model"

            global filter_excepthook
            if filter_excepthook:
                action = "loading"
            else:
                action = "running"

            msg = "Unexpected exception while %s %s:\n    " % (action, model)
            #
            # This handles the case where the error is propagated by a KeyError.
            # KeyError likes to pass raw strings that don't handle newlines
            # (they translate "\n" to "\\n"), as well as tacking on single
            # quotes at either end of the error message. This undoes all that.
            #
            errStr = str(err)
            if type(err) == KeyError and errStr != "None":
                errStr = str(err).replace(r"\n", "\n")[1:-1]

            logger.error(msg + errStr)
            errorcode = 1

    configure_loggers(shutdown=True)

    if options.runtime.disable_gc:
        gc.enable()
    TempfileManager.pop(remove=not options.runtime.keep_files)
    return Container(retval=retval, errorcode=errorcode)
Beispiel #17
0
    if args.debug:
        pdb.run('reg.loadElementTree(tree)')
    else:
        startTimer(args.time)
        reg.loadElementTree(tree)
        endTimer(args.time, '* Time to parse ElementTree =')

    if args.validate:
        reg.validateGroups()

    if args.dump:
        write('* Dumping registry to regdump.txt', file=sys.stderr)
        reg.dumpReg(filehandle=open('regdump.txt', 'w', encoding='utf-8'))

    # create error/warning & diagnostic files
    errWarn = open(args.errfile, 'w',
                   encoding='utf-8') if args.errfile else sys.stderr
    diag = open(args.diagfile, 'w',
                encoding='utf-8') if args.diagfile else None

    if args.debug:
        pdb.run('genTarget(args)')
    elif args.profile:
        import cProfile
        import pstats
        cProfile.run('genTarget(args)', 'profile.txt')
        p = pstats.Stats('profile.txt')
        p.strip_dirs().sort_stats('time').print_stats(50)
    else:
        genTarget(args)
Beispiel #18
0
import pstats

if __name__ == '__main__':
    '''
    Print some stats from python CPU profiling data files
    '''
    import optparse
    import sys
    parser = optparse.OptionParser(usage='%prog <profile.dat>')
    opt, args = parser.parse_args()

    if len(args) != 1:
        parser.print_help()
        sys.exit(-1)

    fn = args[0]
    st = pstats.Stats(fn)
    st = st.strip_dirs()
    st = st.sort_stats('cumulative')
    st.print_stats()
Beispiel #19
0
    ds = [rf(memory_file) for memory_file in memory_files]

def test_python_read_files():
    all_files = [open(fn, 'rb').read() for fn in filenames4]

if __name__ == "__main__":
    runs = ['datasets=test_full_read()',
            # 'test_partial()', 
            # 'test_mem_read_full()',
                    # 'test_mem_read_small()',
            'test_python_read_files()',
           ]
    random.shuffle(runs)
    for testrun in runs:
        cProfile.run(testrun, tempfile)
        p = pstats.Stats(tempfile)
        print "---------------"
        print testrun
        print "---------------"
        p.strip_dirs().sort_stats('time').print_stats(5)
    print "Confirming file read worked -- check for data elements near end"
    try:
        image_sizes = [len(ds.PixelData) for ds in datasets]
    except Exception, e:
        print "Failed to access dataset data for all files\nError:" + str(e)
    else:
        print "Reads checked ok."

    # Clear disk cache for next run?
    import sys
    if not on_windows:
Beispiel #20
0
def execute(args, parser):
    import logging
    import os
    import sys

    import yaml

    from hexrd import config
    from hexrd.fitgrains import fit_grains

    # load the configuration settings
    cfgs = config.open(args.yml)

    # configure logging to the console:
    log_level = logging.DEBUG if args.debug else logging.INFO
    if args.quiet:
        log_level = logging.ERROR
    logger = logging.getLogger('hexrd')
    logger.setLevel(log_level)
    ch = logging.StreamHandler()
    ch.setLevel(logging.CRITICAL if args.quiet else log_level)
    cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S')
    ch.setFormatter(cf)
    logger.addHandler(ch)

    # if find-orientations has not already been run, do so:
    quats_f = os.path.join(cfgs[0].working_dir, 'accepted_orientations.dat')
    if not os.path.exists(quats_f):
        logger.info("Missing %s, running find-orientations", quats_f)
        logger.removeHandler(ch)
        from . import findorientations
        findorientations.execute(args, parser)
        logger.addHandler(ch)

    logger.info('=== begin fit-grains ===')

    clobber = args.force or args.clean
    for cfg in cfgs:
        # prepare the analysis directory
        if os.path.exists(cfg.analysis_dir) and not clobber:
            logger.error(
                'Analysis "%s" at %s already exists.'
                ' Change yml file or specify "force"', cfg.analysis_name,
                cfg.analysis_dir)
            sys.exit()
        if not os.path.exists(cfg.analysis_dir):
            os.makedirs(cfg.analysis_dir)

        logger.info('*** begin analysis "%s" ***', cfg.analysis_name)

        # configure logging to file for this particular analysis
        logfile = os.path.join(cfg.working_dir, cfg.analysis_name,
                               'fit-grains.log')
        fh = logging.FileHandler(logfile, mode='w')
        fh.setLevel(log_level)
        ff = logging.Formatter('%(asctime)s - %(name)s - %(message)s',
                               '%m-%d %H:%M:%S')
        fh.setFormatter(ff)
        logger.info("logging to %s", logfile)
        logger.addHandler(fh)

        if args.profile:
            import cProfile as profile, pstats, StringIO
            pr = profile.Profile()
            pr.enable()

        # process the data
        if args.grains is not None:
            args.grains = [int(i) for i in args.grains.split(',')]
        fit_grains(cfg,
                   force=args.force,
                   show_progress=not args.quiet,
                   ids_to_refine=args.grains,
                   clean=args.clean)

        if args.profile:
            pr.disable()
            s = StringIO.StringIO()
            ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
            ps.print_stats(50)
            logger.info('%s', s.getvalue())

        # stop logging for this particular analysis
        fh.flush()
        fh.close()
        logger.removeHandler(fh)

        logger.info('*** end analysis "%s" ***', cfg.analysis_name)

    logger.info('=== end fit-grains ===')
    # stop logging to the console
    ch.flush()
    ch.close()
    logger.removeHandler(ch)
Beispiel #21
0
 def wrapper(*args, **kwargs):
     prof = cProfile.Profile()
     retval = prof.runcall(func, *args, **kwargs)
     ps = pstats.Stats(prof)
     ps.strip_dirs().sort_stats("time", "cumulative").print_stats(20)
     return retval
Beispiel #22
0
import pstats
import sys

if len(sys.argv) == 1:
    print("Usage: run python -m cProfile -o restats python_script.py args")
    print(
        "then python pstats_cprof.py restats col_name(ncalls  tottime  percall  cumtime  percall)"
    )
    exit()

restats_file = sys.argv[1]
sort_col = sys.argv[2]

p = pstats.Stats(restats_file)
p.strip_dirs().sort_stats(sort_col).print_stats()
#p.print_callees('event_manager.py:15(events)')
Beispiel #23
0
import cProfile
import time
import sys

p = cProfile.Profile()
p.enable()

try:
    start_main_import = time.time()
    from azure.cli.main import main
    end_main_import = time.time()
    main(sys.argv[1:])
    end_main = time.time()
finally:
    end_time = time.time()
    p.disable()
    from StringIO import StringIO
    import pstats
    sio = StringIO()
    sortby = 'cumulative'
    ps = pstats.Stats(p, stream=sio).sort_stats(sortby)
    ps.print_stats(50)
    print(sio.getvalue())

Beispiel #24
0
        :return: None
        """
        self.running = False


if len(sys.argv) > 1 and 'console' in sys.argv[1:]:
    # command line start
    if 'profile' in sys.argv[1:]:
        # start with profiling
        import cProfile
        import StringIO
        import pstats

        pr = cProfile.Profile(builtins=False)
        pr.enable()
        Main()
        pr.disable()
        s = StringIO.StringIO()
        sortby = 'cumulative'
        ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
        ps.print_stats()
        print s.getvalue()
    else:
        Main()
else:
    # Daemonize flowd aggregator
    daemon = Daemonize(app="flowd_aggregate",
                       pid='/var/run/flowd_aggregate.pid',
                       action=Main)
    daemon.start()
Beispiel #25
0
def oai_profile():
    """
    Runs a benchmark
    """
    from six import StringIO
    oai_list_records_or_identifiers(StringIO(),
                                    argd={
                                        "metadataPrefix": "oai_dc",
                                        "verb": "ListRecords"
                                    })
    oai_list_records_or_identifiers(StringIO(),
                                    argd={
                                        "metadataPrefix": "marcxml",
                                        "verb": "ListRecords"
                                    })
    oai_list_records_or_identifiers(StringIO(),
                                    argd={
                                        "metadataPrefix": "oai_dc",
                                        "verb": "ListIdentifiers"
                                    })
    return


if __name__ == "__main__":
    import profile
    import pstats
    profile.run('oai_profile()', "oai_profile")
    p = pstats.Stats("oai_profile")
    p.strip_dirs().sort_stats("cumulative").print_stats()
import cProfile
import pstats
from apps.onedimensional.thinfilm import convergence_test

if __name__ == "__main__":
    cProfile.run('convergence_test.single_run(1, 20)', 'stats')
    p = pstats.Stats('stats')
    p.strip_dirs()
    p.sort_stats(pstats.SortKey.CUMULATIVE).print_stats(20)
Beispiel #27
0
        data = pd.read_table(opts.infile, dtype='string')
        if opts.verbose > 0: print('dropping duplicates...')
        data = data.drop_duplicates()
        if opts.verbose > 0: print('saving file...')
        data.to_csv(opts.infile, index=None, sep="\t", na_rep='', mode='w', line_terminator='\n')
        
    except Exception, e:
        indent = len(program_name) * " "
        sys.stderr.write(program_name + ": " + repr(e) + "\n")
        sys.stderr.write(indent + "  for help use --help")
        return 2


if __name__ == "__main__":
#     if DEBUG:
#         sys.argv.append("-h")
    if TESTRUN:
        import doctest
        doctest.testmod()
    if PROFILE:
        import cProfile
        import pstats
        profile_filename = 'edu.cornell.gobii.LGduplicates_profile.txt'
        cProfile.run('main()', profile_filename)
        statsfile = open("profile_stats.txt", "wb")
        p = pstats.Stats(profile_filename, stream=statsfile)
        stats = p.strip_dirs().sort_stats('cumulative')
        stats.print_stats()
        statsfile.close()
        sys.exit(0)
    sys.exit(main())
Beispiel #28
0
'''

from math import log
from timeit import Timer
import cProfile as profile
import pstats


def groffle_fast(mass, density):
    total = 0.0
    masslog = log(mass * density)
    for i in range(10000):
        total += masslog / (i + 1)
    return total


mass = 2.5
density = 12.0

timer_fast = Timer("total = groffle_fast(mass, density)",
                   "from __main__ import groffle_fast, mass, density")
print("groffle_fast time:", timer_fast.timeit(number=1000))
profile.run("groffle_fast(mass,density)", "groffleDataFast")
s = pstats.Stats("groffleDataFast")
s.print_stats()
'''
Slow Total:  33.28958002253529  Fast Total:  33.28958002253529
groffle_slow time: 3.785501196049154
groffle_fast time: 1.9862927800277248
'''
Beispiel #29
0
            print("%s datasets, %s blocks, %s bytes (%s TB), %s nevts, %s nlumis, cput %s, copies %s, %s" \
                    % (ndatasets, nblocks, size, teraBytes(size), nevts, nlumis, cput, ncopies, sites))
            # find out which site can serve given workflow request
            t0 = time.time()
            lheInput, primary, parent, secondary, allowedSites \
                    = getSiteWhiteList(wspec, siteInfo, reqSpecs)
            rdict = dict(name=wname, datasets=datasets, blocks=datasetBlocks,\
                    npileups=npileups, size=size, njobs=njobs,\
                    nevents=nevts, nlumis=nlumis, cput=cput, ncopies=ncopies,\
                    sites=sites, allowedSites=allowedSites, parent=parent,\
                    lheInput=lheInput, primary=primary, secondary=secondary)
            requests[wname] = rdict
            print("sites", allowedSites)
            elapsedTime(t0, "getSiteWhiteList")
    print("\ntotal # of workflows %s, datasets %s, blocks %s, evts %s, size %s (%s TB), cput %s (hours)" \
            % (len(winfo.keys()), len(datasets), totBlocks, totEvents, totSize, teraBytes(totSize), totCpuT))
    elapsedTime(tst0, 'workflows info')
    elapsedTime(orig)
    return requests


if __name__ == '__main__':
    import cProfile  # python profiler
    import pstats  # profiler statistics
    cmd = 'requestsInfo()'
    cProfile.runctx(cmd, globals(), locals(), 'profile.dat')
    stats = pstats.Stats('profile.dat')
    stats.sort_stats('cumulative')
    stats.print_stats()
#     requestsInfo()
Beispiel #30
0
import myrandom
myrandom.seed(0)


def profrun():
    if l is not None:
        l.start(s)
    for _ in xrange(15000):
        if interrupter is not None:
            interrupter.step()
        else:
            s.step()


PROFMODE = True

if PROFMODE:
    try:
        import cProfile as profile
    except:
        import profile
    profile.run('profrun()', 'fooprof')
    s.print_report()

    import pstats
    pstats.Stats('fooprof').sort_stats('time').print_stats(40)

else:
    profrun()
    s.print_report()