Пример #1
0
def starfish():
    parser = build_parser()
    args, argv = parser.parse_known_args()

    art = """
         _              __ _     _
        | |            / _(_)   | |
     ___| |_ __ _ _ __| |_ _ ___| |__
    / __| __/ _` | '__|  _| / __| '_  `
    \__ \ || (_| | |  | | | \__ \ | | |
    |___/\__\__,_|_|  |_| |_|___/_| |_|

    """
    print(art)
    if args.profile:
        profiler = cProfile.Profile()
        profiler.enable()

    if args.starfish_command is None:
        parser.print_help()
        parser.exit(status=2)
    args.starfish_command(args, len(argv) != 0)

    if args.profile:
        stats = Stats(profiler)
        stats.sort_stats('tottime').print_stats(PROFILER_LINES)
Пример #2
0
def example_one():
    """
             20003 function calls in 0.778 seconds

   Ordered by: cumulative time

   ncalls  tottime  percall  cumtime  percall filename:lineno(function)
        1    0.000    0.000    0.778    0.778 item_58_profile.py:38(<lambda>)
        1    0.001    0.001    0.778    0.778 item_58_profile.py:19(insertion_sort)
    10000    0.764    0.000    0.777    0.000 item_58_profile.py:26(insert_value)
     9989    0.013    0.000    0.013    0.000 {method 'insert' of 'list' objects}
        1    0.000    0.000    0.000    0.000 {method 'disable' of '_lsprof.Profiler' objects}
       11    0.000    0.000    0.000    0.000 {method 'append' of 'list' objects}


    - notes for Nick:
        - we can see that the biggest use of CPU in our test is the cumulative
          time spent in the 'insert_value' function.
    """
    max_size = 10**4
    data = [randint(0, max_size) for _ in range(max_size)]
    test = lambda: insertion_sort(data)

    profiler = Profile()
    profiler.runcall(test)

    # to extract statistics about the 'test' function's performance, we use pstats
    stats = Stats(profiler)
    stats.strip_dirs()
    stats.sort_stats('cumulative')
    stats.print_stats()
Пример #3
0
    def expose(self, widget, event):
        context = widget.window.cairo_create()
        #r = (event.area.x, event.area.y, event.area.width, event.area.height)
        #context.rectangle(r[0]-.5, r[1]-.5, r[2]+1, r[3]+1)
        #context.clip()

        if False:
            import profile
            profile.runctx("self.draw(context, event.area)", locals(),
                           globals(), "/tmp/pychessprofile")
            from pstats import Stats
            s = Stats("/tmp/pychessprofile")
            s.sort_stats('cumulative')
            s.print_stats()
        else:
            self.drawcount += 1
            start = time()
            self.animationLock.acquire()
            self.draw(context, event.area)
            self.animationLock.release()
            self.drawtime += time() - start
            #if self.drawcount % 100 == 0:
            #    print "Average FPS: %0.3f - %d / %d" % \
            #      (self.drawcount/self.drawtime, self.drawcount, self.drawtime)

        return False
Пример #4
0
def example_two():
    """
             30003 function calls in 0.018 seconds

   Ordered by: cumulative time

   ncalls  tottime  percall  cumtime  percall filename:lineno(function)
        1    0.000    0.000    0.018    0.018 item_58_profile.py:98(<lambda>)
        1    0.001    0.001    0.018    0.018 item_58_profile.py:38(insertion_sort)
    10000    0.002    0.000    0.017    0.000 item_58_profile.py:88(insert_value_better)
    10000    0.012    0.000    0.012    0.000 {method 'insert' of 'list' objects}
    10000    0.003    0.000    0.003    0.000 {built-in method _bisect.bisect_left}
        1    0.000    0.000    0.000    0.000 {method 'disable' of '_lsprof.Profiler' objects}



    """
    max_size = 10**4
    data = [randint(0, max_size) for _ in range(max_size)]
    test = lambda: insertion_sort(data)

    profiler = Profile()
    profiler.runcall(test)

    # to extract statistics about the 'test' function's performance, we use pstats
    stats = Stats(profiler)
    stats.strip_dirs()
    stats.sort_stats('cumulative')
    stats.print_stats()
Пример #5
0
 def tearDown(self):
     """ """
     p = Stats(self.prof)
     p.sort_stats("cumtime")
     if self.verbose:
         p.dump_stats("profiles/test_graph.py.prof")
     p.strip_dirs()
Пример #6
0
def profile():
    prof = Profile()
    prof.runcall(f1)
    stat = Stats(prof)
    stat.strip_dirs()
    stat.sort_stats('cumulative')
    stat.print_stats()
Пример #7
0
    def __call__(self, environ, start_response):
        response_body = []

        def catching_start_response(status, headers, exc_info=None):
            start_response(status, headers, exc_info)
            return response_body.append

        def runapp():
            appiter = self._app(environ, catching_start_response)
            response_body.extend(appiter)
            if hasattr(appiter, 'close'):
                appiter.close()

        p = Profile()
        p.runcall(runapp)
        body = ''.join(response_body)
        stats = Stats(p)
        stats.sort_stats(*self._sort_by)

        self._stream.write('-' * 80)
        self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
        stats.print_stats(*self._restrictions)
        self._stream.write('-' * 80 + '\n\n')

        return [body]
Пример #8
0
    def __init__(self):
        settings_manager = SettingsManager() # Set up the settings_manager

        max_workers = settings_manager.getint('application', 'max-workers') # Get the max workers from settings manager
        profiler_on = settings_manager.getint('debugging', 'profiler-on') # Get whether there is a profiler
        absolute = settings_manager.getint('save', 'absolute') # Get whether it's an absolute path
        save_path = settings_manager.get('save', 'path') # Get whether it's an absolute path
        if not absolute:
            save_path = PROJECT_PATH + os.path.sep + save_path

        executor = ThreadPoolExecutor(max_workers=max_workers, profiler_on=profiler_on) # Set up the thread executor
        dis = Disassembler(settings_manager) # Build the disassembler
        server = PyDAServer('0.0.0.0',9000) # Set up the PyDA server
        save_manager = SaveManager(save_path)

        if profiler_on:
            profile = Profile()
            profile.enable()

        app.build_and_run(settings_manager, dis, executor, server, save_manager) # Run the interface

        if profiler_on:
            profile.disable()
            stats = executor.getProfileStats()
            if stats == None:
                stats = Stats(profile)
            else:
                stats.add(profile)
            with open('profile.stats', 'wb') as statsfile:
                stats.stream = statsfile
                stats.sort_stats('cumulative').print_stats()
Пример #9
0
 def __analyze2 ():
     import profile
     profile.runctx("self.__analyze2()", locals(), globals(), "/tmp/pychessprofile")
     from pstats import Stats
     s = Stats("/tmp/pychessprofile")
     s.sort_stats('cumulative')
     s.print_stats()
Пример #10
0
 def tearDown(self):
     """Report profiling results"""
     p = Stats(self.pr)
     p.strip_dirs()
     p.sort_stats('cumtime')
     p.print_stats()
     print "\n--->>>"
Пример #11
0
 def tearDown(self):
     if self.should_profile:
         results = Stats(self.profile)
         results.strip_dirs()
         results.sort_stats('cumulative')
         results.print_stats(50)
     super().tearDown()
Пример #12
0
    def _execute(self, func, phase_name, n, *args):
        if not self.profile_dir:
            return func(*args)

        basename = '%s-%s-%d-%02d-%d' % (
            self.contender_name, phase_name, self.objects_per_txn, n, self.rep)
        txt_fn = os.path.join(self.profile_dir, basename + ".txt")
        prof_fn = os.path.join(self.profile_dir, basename + ".prof")

        profiler = cProfile.Profile()
        profiler.enable()
        try:
            res = func(*args)
        finally:
            profiler.disable()

        profiler.dump_stats(prof_fn)

        with open(txt_fn, 'w') as f:
            st = Stats(profiler, stream=f)
            st.strip_dirs()
            st.sort_stats('cumulative')
            st.print_stats()

        return res
Пример #13
0
 def stopTest(self, test):
     super(BenchTestResult, self).stopTest(test)
     if self._benchmark:
         self._profiler.disable()
         stats = Stats(self._profiler)
         stats.sort_stats(self._sort)
         stats.print_stats(self._limit)
 def stopTest(self, test):
     super(BenchTestResult, self).stopTest(test)
     if self._benchmark:
         self._profiler.disable()
         stats = Stats(self._profiler)
         stats.sort_stats(self._sort)
         stats.print_stats(self._limit)
Пример #15
0
 def prof_wrapper(*args, **kwargs):
     with cProfile.Profile() as pr:
         value = profunc(*args, **kwargs)
     p = Stats(pr)
     p.sort_stats(SortKey.TIME).dump_stats(
         f"profiles/{profunc.__name__}_{args[0]}.prof")
     return value
Пример #16
0
 def expose(self, widget, event):
     context = widget.window.cairo_create()
     #r = (event.area.x, event.area.y, event.area.width, event.area.height)
     #context.rectangle(r[0]-.5, r[1]-.5, r[2]+1, r[3]+1)
     #context.clip()
     
     if False:
         import profile
         profile.runctx("self.draw(context, event.area)", locals(), globals(), "/tmp/pychessprofile")
         from pstats import Stats
         s = Stats("/tmp/pychessprofile")
         s.sort_stats('cumulative')
         s.print_stats()
     else:
         self.drawcount += 1
         start = time()
         self.animationLock.acquire()
         self.draw(context, event.area)
         self.animationLock.release()
         self.drawtime += time() - start
         #if self.drawcount % 100 == 0:
         #    print "Average FPS: %0.3f - %d / %d" % \
         #      (self.drawcount/self.drawtime, self.drawcount, self.drawtime)
         
     return False
Пример #17
0
    def __call__(self, environ, start_response):
        response_body = []

        def catching_start_response(status, headers, exc_info=None):
            start_response(status, headers, exc_info)
            return response_body.append

        def runapp():
            appiter = self._app(environ, catching_start_response)
            response_body.extend(appiter)
            if hasattr(appiter, 'close'):
                appiter.close()

        p = Profile()
        p.runcall(runapp)
        body = ''.join(response_body)
        stats = Stats(p, stream=self._stream)
        stats.sort_stats(*self._sort_by)

        self._stream.write('-' * 80)
        self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
        stats.print_stats(*self._restrictions)
        self._stream.write('-' * 80 + '\n\n')

        return [body]
Пример #18
0
 def tearDown(self):
     if self.should_profile:
         results = Stats(self.profile)
         results.strip_dirs()
         results.sort_stats('cumulative')
         results.print_stats(50)
     super().tearDown()
Пример #19
0
def main():
    """Main sequence"""
    analyser = Analyser(config=ProfilingConfig)
    data = import_all(config=ProfilingConfig)
    analyser.run(data)
    del analyser
    del data
    profiler = Profile()
    tracemalloc.start(10)
    time1 = tracemalloc.take_snapshot()
    profiler.runcall(test)
    time2 = tracemalloc.take_snapshot()
    time_stats = Stats(profiler)
    time_stats.strip_dirs()
    time_stats.sort_stats('cumulative')
    print("\n===Time Profiler Stats===\n")
    time_stats.print_stats(TOP_STATS)
    print("\n===Time Profiler Callers===\n")
    time_stats.print_callers(TOP_STATS)
    memory_stats = time2.compare_to(time1, 'lineno')
    print("\n===Memory Profiler Callers===\n")
    for stat in memory_stats[:3]:
        print(stat)
    print("\n===Top Memory Consumer===\n")
    top = memory_stats[0]
    print('\n'.join(top.traceback.format()))
Пример #20
0
def profile(to=None, sort_by='cumtime'):
	'''Profiles a chunk of code, use with the ``with`` statement::
	
	    from halonctl.debug import profile
	    
	    with profile('~/Desktop/stats'):
	    	pass # Do something performance-critical here...
	
	Results for individual runs are collected into ``to``. The specifics of how
	reports are done varies depending on what type ``to`` is.
	
	* **File-like objects**: Stats are dumped, according to ``sort_by``, into the stream, separated by newlines - watch out, the file/buffer may grow very big when used in loops.
	* **List-like objects**: A number of pstats.Stats objects are appended.
	* **str and unicode**: Treated as a path and opened for appending. Tildes (~) will be expanded, and intermediary directories created if possible.
	* **None or omitted**: Results are printed to sys.stderr.
	'''
	
	if isinstance(to, six.string_types):
		to = open_fuzzy(to, 'a')
	
	to_is_stream = hasattr(to, 'write')
	to_is_list = hasattr(to, 'append')
	
	p = Profile()
	p.enable()
	yield
	p.disable()
	
	ps = Stats(p, stream=to if to_is_stream else sys.stderr)
	ps.sort_stats('cumtime')
	
	if to_is_stream or to is None:
		ps.print_stats()
	elif to_is_list:
		to.append(ps)
Пример #21
0
 def tearDown(self):
     if PROFILE:
         p = Stats(self.pr)
         p.strip_dirs()
         p.sort_stats('cumtime')
         p.print_stats()
         if DEBUG:
             print('\n{}>>>'.format('-' * 77))
Пример #22
0
 def wrap(*args, **kwargs):
     prof = Profile()
     res = prof.runcall(func, *args, **kwargs)
     stats = Stats(prof)
     stats.strip_dirs()
     stats.sort_stats('tottime')
     stats.print_stats(20)
     return res
Пример #23
0
 def tearDown(self):
     """finish any test"""
     if hasattr(self, "prof"):
         p = Stats(self.prof)
         p.strip_dirs()
         p.sort_stats('cumtime')
         p.print_stats()
         print("\n--->>>")
Пример #24
0
 def tearDown(self):
     """Disconnect from statseg"""
     self.stat.disconnect()
     profile = Stats(self.profile)
     profile.strip_dirs()
     profile.sort_stats("cumtime")
     profile.print_stats()
     print("\n--->>>")
Пример #25
0
def print_stats(statsfile, statstext):
    with open(statstext, 'w') as f:
        mystats = Stats(statsfile, stream=f)
        mystats.strip_dirs()
        mystats.sort_stats('cumtime')
        # mystats.print_callers('_strptime')
        mystats.print_stats()
    startfile(statstext)
Пример #26
0
 def tearDownClass(cls):
     if cls.is_running:
         return
     urlopen('http://localhost:8000/quit')
     cls.cli.close()
     p = Stats(cls.profiler)
     p.strip_dirs()
     p.sort_stats('cumtime')
Пример #27
0
 def tearDown(self):
     '''Disconnect from statseg'''
     self.stat.disconnect()
     profile = Stats(self.profile)
     profile.strip_dirs()
     profile.sort_stats('cumtime')
     profile.print_stats()
     print("\n--->>>")
 def tearDown(self):
     return
     """finish any test"""
     p = Stats(self.pr)
     p.strip_dirs()
     p.sort_stats('cumtime')
     p.print_stats()
     print("\n--->>>")
 def tearDownClass(cls):
     if cls.is_running:
         return
     urlopen('http://localhost:8000/quit')
     cls.cli.close()
     p = Stats(cls.profiler)
     p.strip_dirs()
     p.sort_stats('cumtime')
Пример #30
0
def search_method():
    """Match for applicable methods and their arguments.
    
    Input:
    * username: username.
    * theory_name: name of the theory.
    * thm_name: name of the theorem.

    Returns:
    * search_res: list of search results.
    * ctxt: current proof context.

    """
    data = json.loads(request.get_data().decode("utf-8"))

    if data['profile']:
        pr = cProfile.Profile()
        pr.enable()

    if not proof_cache.check_cache(data):
        start_time = time.perf_counter()
        proof_cache.create_cache(data)
        print("Load: %f" % (time.perf_counter() - start_time))

    if data['thm_name'] != '':
        limit = ('thm', data['thm_name'])
    else:
        limit = None
    basic.load_theory(data['theory_name'],
                      limit=limit,
                      username=data['username'])

    start_time = time.perf_counter()
    state = proof_cache.states[data['index']]
    fact_ids = data['step']['fact_ids']
    goal_id = data['step']['goal_id']

    search_res = state.search_method(goal_id, fact_ids)
    with settings.global_setting(unicode=True):
        for res in search_res:
            if '_goal' in res:
                res['_goal'] = [printer.print_term(t) for t in res['_goal']]
            if '_fact' in res:
                res['_fact'] = [printer.print_term(t) for t in res['_fact']]

    vars = state.get_vars(goal_id)
    with settings.global_setting(unicode=True, highlight=True):
        print_vars = dict((k, printer.print_type(v)) for k, v in vars.items())
    print("Response:", time.perf_counter() - start_time)

    if data['profile']:
        p = Stats(pr)
        p.strip_dirs()
        p.sort_stats('cumtime')
        p.print_stats()

    return jsonify({'search_res': search_res, 'ctxt': print_vars})
Пример #31
0
def print_profile_data():
    """
    Print the collected profile data.
    """
    stream = StringIO()
    statistics = Stats(profiler, stream=stream)
    statistics.sort_stats('cumulative')
    statistics.print_stats()
    print(stream.getvalue())
Пример #32
0
def home_p(request):
    """Profiled version of home"""
    prof = Profile()
    prof = prof.runctx("home(request)", globals(), locals())
    stream = StringIO()
    stats = Stats(prof, stream=stream)
    stats.sort_stats("time").print_stats(80)
    log.info("Profile data:\n%s", stream.getvalue())
    return HttpResponse(u"OK")
Пример #33
0
def print_profile_data():
    """
    Print the collected profile data.
    """
    stream = StringIO()
    statistics = Stats(profiler, stream=stream)
    statistics.sort_stats('cumulative')
    statistics.print_stats()
    print(stream.getvalue())
Пример #34
0
 def inner(*args, **kwargs):
     pro = Profile()
     pro.runcall(func, *args, **kwargs)
     stats = Stats(pro)
     stats.strip_dirs()
     stats.sort_stats(field)
     print("Profile for {}()".format(func.__name__))
     stats.print_stats()
     stats.print_callers()
Пример #35
0
def run():
    if sys.version_info < (3, 0, 0):
        sys.stderr.write("You need python 3.0 or later to run this script\n")
        sys.exit(1)

    arg_parser = get_arg_parser()
    args = arg_parser.parse_args()

    if not args.p1_path and (not args.p1_draft or not args.p1_battle):
        arg_parser.error("You should use either p1-path or both "
                         "p1-draft and p1-battle.\n")
    elif not args.p2_path and (not args.p2_draft or not args.p2_battle):
        arg_parser.error("You should use either p2-path or both "
                         "p2-draft and p2-battle.\n")

    if args.p1_path is not None:
        player_1 = agents.NativeAgent(args.p1_path)
        player_1 = (player_1, player_1)
    else:
        player_1 = parse_draft_agent(args.p1_draft)(), \
                   parse_battle_agent(args.p1_battle)()

    if args.p2_path is not None:
        player_2 = agents.NativeAgent(args.p2_path)
        player_2 = (player_2, player_2)
    else:
        player_2 = parse_draft_agent(args.p2_draft)(), \
                   parse_battle_agent(args.p2_battle)()

    if args.profile:
        profiler = cProfile.Profile()
        result = io.StringIO()

        profiler.enable()

        for i in range(args.games):
            evaluate((i, player_1, player_2, args.seed))

        profiler.disable()

        profiler_stats = Stats(profiler, stream=result)

        profiler_stats.sort_stats('cumulative')
        profiler_stats.print_stats()

        print(result.getvalue())
    else:
        params = ((j, player_1, player_2, args.seed, args.silent)
                  for j in range(args.games))

        with Pool(args.processes) as pool:
            pool.map(evaluate, params)

    wins, games = wins_by_p0
    ratio = 100 * wins / games

    print(f"{'%.2f' % ratio}% {'%.2f' % (100 - ratio)}%")
Пример #36
0
 def build_document(self, file_name):
     """This is the entry point for the NetcfBuilders from the ingestManager.
     These documents are id'd by fcstValidEpoch. The data section is an array
     each element of which contains variable data and a station name. To process this
     file we need to itterate the document by recNum and process the station name along
     with all the other variables in the variableList.
     Args:
         file_name (str): the name of the file being processed
     Returns:
         [dict]: document
     """
     # noinspection PyBroadException
     try:
         # stash the file_name so that it can be used later
         self.file_name = os.path.basename(file_name)
         # pylint: disable=no-member
         self.ncdf_data_set = nc.Dataset(file_name)
         if len(self.station_names) == 0:
             result = self.cluster.query("""SELECT raw name FROM mdata
                 WHERE
                 type = 'MD'
                 AND docType = 'station'
                 AND subset = 'METAR'
                 AND version = 'V01';
             """)
             self.station_names = list(result)
         self.initialize_document_map()
         logging.info("%s building documents for file %s",
                      self.__class__.__name__, file_name)
         if self.do_profiling:
             with cProfile.Profile() as _pr:
                 self.handle_document()
                 with open("profiling_stats.txt", "w") as stream:
                     stats = Stats(_pr, stream=stream)
                     stats.strip_dirs()
                     stats.sort_stats("time")
                     stats.dump_stats("profiling_stats.prof")
                     stats.print_stats()
         else:
             self.handle_document()
         # pylint: disable=assignment-from-no-return
         document_map = self.get_document_map()
         data_file_id = self.create_data_file_id(file_name=file_name)
         data_file_doc = self.build_datafile_doc(
             file_name=file_name,
             data_file_id=data_file_id,
         )
         document_map[data_file_doc["id"]] = data_file_doc
         return document_map
     except Exception as _e:  # pylint:disable=broad-except
         logging.error(
             "%s: Exception with builder build_document: error: %s",
             self.__class__.__name__,
             str(_e),
         )
         return {}
Пример #37
0
def profile_call(_func, *args, **kwargs):
    p = Profile()
    rv = []
    p.runcall(lambda: rv.append(_func(*args, **kwargs)))
    p.dump_stats('/tmp/sentry-%s-%s.prof' % (time.time(), _func.__name__))

    stats = Stats(p, stream=sys.stderr)
    stats.sort_stats('time', 'calls')
    stats.print_stats()
    return rv[0]
Пример #38
0
 def tearDown(self):
     if not DEBUG_MODE:
         self.test_elasticity.collection.drop()
         self.test_tasks.collection.drop()
     if PROFILE_MODE:
         p = Stats(self.pr)
         p.strip_dirs()
         p.sort_stats('cumtime')
         p.print_stats()
         print("\n--->>>")
Пример #39
0
def profile_call(_func, *args, **kwargs):
    p = Profile()
    rv = []
    p.runcall(lambda: rv.append(_func(*args, **kwargs)))
    p.dump_stats(f"/tmp/sentry-{time.time()}-{_func.__name__}.prof")

    stats = Stats(p, stream=sys.stderr)
    stats.sort_stats("time", "calls")
    stats.print_stats()
    return rv[0]
Пример #40
0
 def tearDown(self):
     for worker in self.driver._workers:
         worker.stop()
         worker.wait()
     self.cvx.endpoint_data.clear()
     super(MechTestBase, self).tearDown()
     if ENABLE_PROFILER:
         p = Stats(self.pr)
         p.strip_dirs()
         p.sort_stats('cumtime')
         p.print_stats()
Пример #41
0
 def wrapper(*args, **kwg):
     f = func
     res = None
     try:
         cProfile.runctx("res = f(*args, **kwg)", globals(), locals(), filename)
         return res
     finally:
         if filename:
             pstats = Stats(filename)
             pstats.sort_stats(*sort_fields)
             pstats.print_stats(*p_amount)
def show_time_profiler_results(pr, top_records):
    """
    Show results of timed profiling.
    :param pr: profiler instance
    :param top_records: how many top function calls to show.
    """
    if pr:
        st = Stats(pr)
        st.strip_dirs()
        st.sort_stats('cumulative')
        st.print_stats(top_records)
Пример #43
0
def example_three():
    """
             20242 function calls in 0.063 seconds

   Ordered by: cumulative time

   ncalls  tottime  percall  cumtime  percall filename:lineno(function)
        1    0.000    0.000    0.063    0.063 item_58_profile.py:140(my_program)
       20    0.002    0.000    0.063    0.003 item_58_profile.py:130(first_func)
    20200    0.061    0.000    0.061    0.000 item_58_profile.py:124(my_utility)
       20    0.000    0.000    0.001    0.000 item_58_profile.py:135(second_func)
        1    0.000    0.000    0.000    0.000 {method 'disable' of '_lsprof.Profiler' objects}



    -notes for Nick
        - you may profile your program only to find that a common utility
          function is responsible for majority of the execution time.

        - default output from the profiler doesn't show how the utility
          function is called by many different parts of your program.

        - 'my_utility' function is evidently the source of most execution time,
          but it is not immediately obvious why that function is called so many times.

    ---- Callers -----

    Ordered by: cumulative time

    Function                                          was called by...
                                                          ncalls  tottime  cumtime
    item_58_profile.py:140(my_program)                <-
    item_58_profile.py:130(first_func)                <-      20    0.002    0.062  item_58_profile.py:140(my_program)
    item_58_profile.py:124(my_utility)                <-   20000    0.061    0.061  item_58_profile.py:130(first_func)
                                                             200    0.001    0.001  item_58_profile.py:135(second_func)
    item_58_profile.py:135(second_func)               <-      20    0.000    0.001  item_58_profile.py:140(my_program)
    {method 'disable' of '_lsprof.Profiler' objects}  <-



    """
    profiler = Profile()
    profiler.runcall(my_program)

    # to extract statistics about the 'test' function's performance, we use pstats
    stats = Stats(profiler)
    stats.strip_dirs()
    stats.sort_stats('cumulative')
    stats.print_stats()

    print('\n---- Callers -----\n')

    # to see how many times a function is called
    stats.print_callers()
Пример #44
0
def print_stats(profiler, printCallers=False):
    from pstats import Stats
    
    stats = Stats(profiler)
    stats.strip_dirs()
    stats.sort_stats('cumulative')
    
    if printCallers is True:
        stats.print_callers()
    else:    
        stats.print_stats()
Пример #45
0
def show_time_profiler_results(pr, top_records):
    """
    Show results of timed profiling.
    :param pr: profiler instance
    :param top_records: how many top function calls to show.
    """
    if pr:
        st = Stats(pr)
        st.strip_dirs()
        st.sort_stats('cumulative')
        st.print_stats(top_records)
Пример #46
0
def profile(func, args=None, kwargs=None, sort="time"):
    prof = profile_.Profile()
    if args is None:
        args = ()
    if kwargs is None:
        kwargs = {}
    ret = prof.runcall(func, *args, **kwargs)
    stats = Stats(prof)
    stats.sort_stats(sort)
    stats.print_stats()
    return ret
Пример #47
0
    def save_data(self):
        try:
            import gprof2dot
            import pyprof2calltree
        except ImportError:
            msg = ('Unable to start profiling.\n Please either '
                   'disable performance profiling in settings.yaml or '
                   'install all modules listed in test-requirements.txt.')
            raise error.ProfilingError(msg)

        self.profiler.disable()
        elapsed = time.time() - self.start
        pref_filename = os.path.join(
            self.paths['last_performance_test'],
            '{method:s}.{handler_name:s}.{elapsed_time:.0f}ms.{t_time}.'.
            format(
                method=self.method,
                handler_name=self.handler_name or 'root',
                elapsed_time=elapsed * 1000.0,
                t_time=time.time()))
        tree_file = pref_filename + 'prof'
        stats_file = pref_filename + 'txt'
        callgraph_file = pref_filename + 'dot'

        # write pstats
        with file(stats_file, 'w') as file_o:
            stats = Stats(self.profiler, stream=file_o)
            stats.sort_stats('time', 'cumulative').print_stats()

        # write callgraph in dot format
        parser = gprof2dot.PstatsParser(self.profiler)

        def get_function_name(args):
            filename, line, name = args
            module = os.path.splitext(filename)[0]
            module_pieces = module.split(os.path.sep)
            return "{module:s}:{line:d}:{name:s}".format(
                module="/".join(module_pieces[-4:]),
                line=line,
                name=name)

        parser.get_function_name = get_function_name
        gprof = parser.parse()

        with open(callgraph_file, 'w') as file_o:
            dot = gprof2dot.DotWriter(file_o)
            theme = gprof2dot.TEMPERATURE_COLORMAP
            dot.graph(gprof, theme)

        # write calltree
        call_tree = pyprof2calltree.CalltreeConverter(stats)
        with file(tree_file, 'wb') as file_o:
            call_tree.output(file_o)
 def tearDown(self):
     if ENABLE_PROFILE:
         if DUMP_PROFILE:
             self.pr.dump_stats('profile.out')
         p = Stats(self.pr)
         p.strip_dirs()
         p.sort_stats('time')
         p.print_stats(40)
         p.print_callees('types.py:846\(validate_value', 20)
         p.print_callees('types.py:828\(_validate_primitive_value', 20)
         p.print_callees('uploadsession.py:185\(write', 20)
     TestBase.teardown(self)
Пример #49
0
def profiler(enable, outfile):
    try:
        if enable:
            profiler = Profile()
            profiler.enable()

        yield
    finally:
        if enable:
            profiler.disable()
            stats = Stats(profiler)
            stats.sort_stats('tottime')
            stats.dump_stats(outfile)
Пример #50
0
    def tearDownClass(cls):

        # stop swarm
        print("TEST: stopping swarm")
        for node in cls.swarm:
            node.stop()
        shutil.rmtree(STORAGE_DIR)

        # get profiler stats
        stats = Stats(cls.profile)
        stats.strip_dirs()
        stats.sort_stats('cumtime')
        stats.print_stats()
Пример #51
0
 def _call(self, *args, **kw):
     profile = RawProfile()
     def _run():
         with DisableGc():
             for _ in range(self._iterations):
                 _run.result = super(Profile, self)._call(*args, **kw)
     profile.runctx('_run()', {}, {'_run': _run})
     profile.create_stats()
     stats = Stats(profile)
     stats.sort_stats('cumulative')
     stats.fcn_list = stats.fcn_list[:self._max_lines]
     self._reporter(stats)
     return _run.result
Пример #52
0
def get_profile_report(profiler):
    from pstats import Stats
    from cStringIO import StringIO

    io = StringIO()
    stats = Stats(profiler, stream = io)

    io.write('\nby cumulative time:\n\n')
    stats.sort_stats('cumulative').print_stats(25)

    io.write('\nby number of calls:\n\n')
    stats.sort_stats('time').print_stats(25)

    return io.getvalue()
Пример #53
0
def profile_func(func):
    from cProfile import Profile
    from pstats import Stats

    p = Profile()
    rv = []
    p.runcall(lambda: rv.append(func()))
    p.dump_stats('/tmp/lektor-%s.prof' % func.__name__)

    stats = Stats(p, stream=sys.stderr)
    stats.sort_stats('time', 'calls')
    stats.print_stats()

    return rv[0]
Пример #54
0
 def run(self):
     """method calling cProfile and printing the output"""
     tests=self.tests()
    
     for test in tests:
         tmpBuffer=StringIO.StringIO()
         profile=cProfile.Profile()
         profile.runctx('self.'+str(test[0])+"()",globals(),locals())
         stats=Stats(profile,stream=tmpBuffer)
         stats.sort_stats('time','calls')
         stats.print_stats(1)
         match=re.findall(r'\bin\b(.*?)\bCPU\b',tmpBuffer.getvalue())
        
         print str(test[1].__doc__ )+":"+str(match[0])+" CPU Time"
Пример #55
0
    def process_response(self, request, response):
        if self.is_on(request):
            self.prof.create_stats()
            out = StringIO.StringIO()
            stats = Stats(self.prof, stream=out)

            stats.sort_stats(*sort_tuple)

            stats.print_stats()

            stats_str = out.getvalue()

            if response and response.content and stats_str:
                response.content = "<pre>" + stats_str + "</pre>"
        return response
Пример #56
0
def pytest_runtest_call(item):
    if SHOULD_PROFILE:
        p = Profile()
        p.enable()
        yield
        p.disable()
        stats = Stats(p)
        if SHOULD_PRINT:
            stats.sort_stats('cumulative').print_stats(50)
        if SHOULD_STORE:
             if not os.path.exists(BASEDIR):
                os.mkdir(BASEDIR)
             p.dump_stats(os.path.join(BASEDIR, '%s.pkl' % item.name))
    else:
        yield
Пример #57
0
def profiling():

    from cProfile import Profile
    from pstats import Stats

    ir_params = {  'ev_params'      : {'space':'e3', 'F': [0.1,0.1,0.1], 'j_max':30},
                   'duration'       : 1,
                   'nu'             : 1.7e-5,
                   'sampling_rate'  : 8000,
                }

    p = Profile()
    p.runcall(lambda : get_ir(ir_params))
    stats = Stats(p, stream=sys.stdout)
    stats.sort_stats('time')
    stats.print_stats(10)
Пример #58
0
def __profile_code():
    """
    @return: None
    @rtype: None

    Edit this function to do all profiling.
    """
    import cProfile
    from pstats import Stats
    profile_file = "C:\\Users\\PBS Biotech\\Documents\\Personal\\PBS_Office\\MSOffice\\officelib\\pbslib\\test\\profile.txt"
    cProfile.run('full_scan(manyfile3, manyrecipesteps)', filename=profile_file)
    with open("C:\\Users\\Public\\Documents\\PBSSS\\Functional Testing\\tpid.txt", 'w') as f:
        stats = Stats(profile_file, stream=f)
        # stats.strip_dirs()
        stats.sort_stats('time')
        stats.print_stats('MSOffice')