Example #1
0
def test_given_a_text_column_when_profiler_is_applied_with_high_level_analysis_then_it_finishes_quick(
):
    # given
    if not os.path.exists(TARGET_PROFILE_REPORT_FOLDER):
        os.makedirs(TARGET_PROFILE_REPORT_FOLDER)
    profile = LineProfiler()
    source_data = generate_data()
    expected_execution_time = 4  # benchmarked: (first-time) 46.694923639297485, (cached) 5.918392 seconds

    # when: using default method (joblib Parallel) for parallelisation
    start_execution_time = time()
    profile_wrapper = profile(grammar_check_score)
    for each in source_data:
        profile_wrapper(each)
    actual_execution_time = time() - start_execution_time

    output_filename = f'{TARGET_PROFILE_REPORT_FOLDER}/grammar_check_score-' \
                      f'{datetime.now().strftime("%d-%m-%Y-%H-%M-%S")}-' \
                      f'{shorten_sha(git_current_head_sha())}'
    with open(f'{output_filename}.txt', 'w') as file:
        with redirect_stdout(file):
            profile.print_stats()

    profile.dump_stats(f'{output_filename}.lprof')

    # then
    assert actual_execution_time <= expected_execution_time, \
        f"Expected duration: {expected_execution_time}, Actual duration: {actual_execution_time}. " \
        f"Slow down by: {abs(actual_execution_time - expected_execution_time)} seconds. " \
        f"We have crossed the benchmark limit after a speed up via commit 51a8952."
def test_given_a_text_column_when_profiler_is_applied_with_high_level_analysis_then_it_finishes_quick():
    # given
    TARGET_PROFILE_REPORT_FOLDER = '.cprofile/'
    if not os.path.exists(TARGET_PROFILE_REPORT_FOLDER):
        os.makedirs(TARGET_PROFILE_REPORT_FOLDER)
    profile = LineProfiler()
    source_data = generate_data()
    expected_execution_time = 32  # benchmarked: (first-time) 31.051079034805298, (cached) 0.918392 seconds

    # when: using default method (joblib Parallel) for parallelisation
    start_execution_time = time()
    profile_wrapper = profile(spelling_quality_score)
    for each in source_data:
        profile_wrapper(each)
    end_execution_time = time()
    actual_execution_time = end_execution_time - start_execution_time

    short_sha = shorten_sha(git_current_head_sha())
    output_filename = f'{TARGET_PROFILE_REPORT_FOLDER}/spelling_quality_check-' \
                      f'{datetime.now().strftime("%d-%m-%Y-%H-%M-%S")}-{short_sha}'
    with open(f'{output_filename}.txt', 'w') as file:
        with redirect_stdout(file):
            profile.print_stats()

    profile.dump_stats(f'{output_filename}.lprof')

    # then
    assert actual_execution_time <= expected_execution_time, \
        f"Expected duration: {expected_execution_time}, Actual duration: {actual_execution_time}." \
        f"Slow down by: {abs(actual_execution_time - expected_execution_time)} seconds." \
        f"We are cross the benchmark limit after a speed up after commit a81ed70."
Example #3
0
def assert_benchmark(expected_execution_time: float, target_function,
                     profile_filename_prefix: str, commit_id: str):
    # given
    if not os.path.exists(TARGET_PROFILE_REPORT_FOLDER):
        os.makedirs(TARGET_PROFILE_REPORT_FOLDER)
    profile = LineProfiler()
    source_data = generate_data()

    # when: using default method (joblib Parallel) for parallelisation
    start_execution_time = time()
    profile_wrapper = profile(target_function)
    for each in source_data:
        profile_wrapper(each)
    actual_execution_time = time() - start_execution_time
    output_filename = f'{TARGET_PROFILE_REPORT_FOLDER}/{profile_filename_prefix}-' \
                      f'{datetime.now().strftime("%d-%m-%Y-%H-%M-%S")}-' \
                      f'{shorten_sha(git_current_head_sha())}'
    with open(f'{output_filename}.txt', 'w') as file:
        with redirect_stdout(file):
            profile.print_stats()
    profile.dump_stats(f'{output_filename}.lprof')

    # then
    assert actual_execution_time <= expected_execution_time, \
        f"Expected duration: {expected_execution_time}, Actual duration: {actual_execution_time}. " \
        f"Slow down by: {abs(actual_execution_time - expected_execution_time)} seconds. " \
        f"We have crossed the benchmark limit after a speed up via commit {commit_id}."
 def decorator(*args, **kwargs):
     # line-profiler==3.0.2
     profiler = LineProfiler()
     try:
         profiler.add_function(func)
         profiler.enable_by_count()
         return func(*args, **kwargs)
     finally:
         profiler.dump_stats('/var/log/{}.lprof'.format(time.time()))
Example #5
0
def profileTimeLine(func, args, kw_args):
    from line_profiler import LineProfiler
    prf = LineProfiler(func)
    # from . import util; prf.add_function(util.loadYml) # you may add more functions
    prf.enable()
    func(*args, **kw_args)
    prf.disable()
    prf.dump_stats('lpstats')
    with open('lineprofstats.txt', 'w') as flw:
        prf.print_stats(flw)
 def wrapper(*args, **kwargs):
     profiler = LineProfiler()
     profiler.add_function(fn)
     try:
         for f in follow:
             profiler.add_function(f)
         profiler.enable_by_count()
         return fn(*args, **kwargs)
     finally:
         profiler.print_stats(output_unit=1)
         profiler.dump_stats(report)
Example #7
0
 def wrapper(*args, **kwargs):
     if not hasattr(wrapper, 'cnt'):
         wrapper.cnt = 1
     else:
         wrapper.cnt += 1
     if not (max_cnt >= 0 and wrapper.cnt > max_cnt) and not sys._getframe().f_back.f_code.co_name == func.__name__:
         prof = LineProfiler()
         try:
             return prof(func)(*args, **kwargs)
         finally:
             prof.dump_stats(filename)
     else:
         return func(*args, **kwargs)
Example #8
0
 def wrapper(*args, **kwargs):
     # Don't profile if debugging is off (PROD server mode)
     if DEBUG:
         logger.error('Line Profiling (@profile_this_by_line) ' + func.__name__ + '() to ' + stats_filename + '.')
         profiler = LineProfiler()
         profiled_func = profiler(func)
         try:
             retval = profiled_func(*args, **kwargs)
         finally:
            # profiler.print_stats()
             profiler.dump_stats(stats_filename)
     else:
         logger.error('Line Profiling (@profile_this_by_line) attempted on ' + func.__name__ + '() while in production mode.  Profiling Bypassed.')
         retval = func(*args, **kwargs)
     return retval
Example #9
0
def main():
    profiler = cProfile.Profile()

    profiler.enable()
    function_runner('original_method')
    function_runner('step_one')
    function_runner('step_two')
    function_runner('step_three')
    function_runner('step_four')
    function_runner('step_five')
    function_runner('step_six')
    function_runner('step_seven')
    function_runner('step_eight')
    function_runner('step_nine')
    function_runner('current')
    profiler.disable()

    profiler.dump_stats('function_event.stats')
    line_profiler = LineProfiler(CurrentFunctionContainer().current)
    line_profiler.enable()
    function_runner('current')
    line_profiler.disable()
    line_profiler.dump_stats('function_event.line_stats')
    line_profiler.print_stats()

    print 'Original', timeit.timeit(
        lambda: function_runner('original_method'), number=7)
    print 'One', timeit.timeit(
        lambda: function_runner('step_one'), number=7)
    print 'Two', timeit.timeit(
        lambda: function_runner('step_two'), number=7)
    print 'Three', timeit.timeit(
        lambda: function_runner('step_three'), number=7)
    print 'Four', timeit.timeit(
        lambda: function_runner('step_four'), number=7)
    print 'Five', timeit.timeit(
        lambda: function_runner('step_five'), number=7)
    print 'Six', timeit.timeit(
        lambda: function_runner('step_six'), number=7)
    print 'Seven', timeit.timeit(
        lambda: function_runner('step_seven'), number=7)
    print 'Eight', timeit.timeit(
        lambda: function_runner('step_eight'), number=7)
    print 'Nine', timeit.timeit(
        lambda: function_runner('step_nine'), number=7)
    print 'Current', timeit.timeit(
        lambda: function_runner('current'), number=7)
# Test and profile TimeHistogram
#
# Copyright (C) 2010-2012 Huang Xin
#
# See LICENSE.TXT that came with this file.
#import cProfile,pstats
from line_profiler import LineProfiler
import TimeHistogram


def run():
    psth = TimeHistogram.PSTHAverage(
        '/home/chrox/dev/plexon_data/c04-stim-timing-8ms-rand-1.plx')
    psth.get_data()


if __name__ == '__main__':

    #cProfile.run('psth.get_data()','hist_profile')
    #p = pstats.Stats('hist_profile')
    #p.sort_stats('cumulative')
    #p.print_stats()

    profile = LineProfiler()
    profile.add_function(run)
    profile.add_function(TimeHistogram.PSTHAverage._process_unit)
    profile.run('run()')
    profile.print_stats()
    profile.dump_stats("hist_profile.lprof")
Example #11
0
        properties[i].append(objects[j].moments_hu())
        properties[i].append(objects[j].image())
        properties[i].append(objects[j].label)
        properties[i].append(objects[j].major_axis_length())
        properties[i].append(objects[j].max_intensity())
        properties[i].append(objects[j].mean_intensity())
        properties[i].append(objects[j].min_intensity())
        properties[i].append(objects[j].minor_axis_length())
        properties[i].append(objects[j].moments())
        properties[i].append(objects[j].moments_normalized())
        properties[i].append(objects[j].orientation())
        properties[i].append(objects[j].perimeter())
        properties[i].append(objects[j].solidity())
        properties[i].append(objects[j].weighted_moments_central())
        properties[i].append(objects[j].weighted_centroid())
        properties[i].append(objects[j].weighted_moments_hu())
        properties[i].append(objects[j].weighted_moments())
        properties[i].append(objects[j].weighted_moments_normalized())
    return properties, prop_names


if __name__ == '__main__':
    image = io.imread('test-image.png')
    green = image[..., 1].copy()
    lp = LineProfiler()
    lp.add_function(object_features)
    lp.run('intensity_object_features(green, 100)')
    lp.print_stats()
    lp.dump_stats('profile.lprof')
    print(__file__)
Example #12
0
        unstrobed_word = pu.GetExtEvents(data,
                                         event='unstrobed_word',
                                         online=False)
        print "found %d unstrobed word events in which 10 events are:" % (len(
            unstrobed_word['value']))
        indices = np.arange(0, len(unstrobed_word['value']),
                            len(unstrobed_word['value']) / 10)
        for value, timestamp in zip(unstrobed_word['value'][indices],
                                    unstrobed_word['timestamp'][indices]):
            binary_value = bin(value)
            print "unstrobed word:%s t=%f" % (binary_value, timestamp)


if __name__ == "__main__":
    #run()
    profile = LineProfiler()
    profile.add_function(run)
    profile.add_function(PlexUtil.GetExtEvents)
    profile.add_function(reconstruct_word)
    profile.run('run()')
    profile.print_stats()
    profile.dump_stats("testPlexFile_profile.lprof")

    #cProfile.run('run()','PlexFile_profile')
    #p = pstats.Stats('testPlexFile_profile.lprof')
    #p.sort_stats('cumulative')
    #p.print_stats()

    #print h.heap()
Example #13
0
        for timestamp in bit_2_events[-5:]:
            print "unstrobed bit 2 t=%f" % timestamp
        print "found %d bit 3 events. Last 5 events are:" %(len(bit_3_events))
        for timestamp in bit_3_events[-5:]:
            print "unstrobed bit 3 t=%f" % timestamp
        
        unstrobed_word = pu.GetExtEvents(data, event='unstrobed_word', online=False)
        print "found %d unstrobed word events in which 10 events are:" %(len(unstrobed_word['value']))
        indices = np.arange(0,len(unstrobed_word['value']),len(unstrobed_word['value'])/10)
        for value,timestamp in zip(unstrobed_word['value'][indices],unstrobed_word['timestamp'][indices]) :
            binary_value = bin(value)
            print "unstrobed word:%s t=%f" % (binary_value,timestamp)

if __name__ == "__main__":
        #run()
        profile = LineProfiler()
        profile.add_function(run)
        profile.add_function(PlexUtil.GetExtEvents)
        profile.add_function(reconstruct_word)
        profile.run('run()')
        profile.print_stats()
        profile.dump_stats("testPlexFile_profile.lprof")
        
        #cProfile.run('run()','PlexFile_profile')
        #p = pstats.Stats('testPlexFile_profile.lprof')
        #p.sort_stats('cumulative')
        #p.print_stats()
        
        #print h.heap()

# Test and profile TimeHistogram
#
# Copyright (C) 2010-2012 Huang Xin
# 
# See LICENSE.TXT that came with this file.
#import cProfile,pstats
from line_profiler import LineProfiler
import TimeHistogram

def run():
    psth = TimeHistogram.PSTHAverage('/home/chrox/dev/plexon_data/c04-stim-timing-8ms-rand-1.plx')
    psth.get_data()

if __name__ == '__main__':
    
    #cProfile.run('psth.get_data()','hist_profile')
    #p = pstats.Stats('hist_profile')
    #p.sort_stats('cumulative')
    #p.print_stats()
    
    profile = LineProfiler()
    profile.add_function(run)
    profile.add_function(TimeHistogram.PSTHAverage._process_unit)
    profile.run('run()')
    profile.print_stats()
    profile.dump_stats("hist_profile.lprof")
Example #15
0
# -- coding = 'utf-8' --
# Author Kylin
# Python Version 3.7.3
# OS macOS

import line_profiler
from line_profiler import LineProfiler
import random


def do_something(numbers):
    s = sum(numbers)
    l = [numbers[i] / 43 for i in range(len(numbers))]


numbers = [random.randint(1, 100) for i in range(1000)]
lp = LineProfiler()  # 实例化对象
lp_wrapper = lp(do_something)  # 封装想要处理的函数
lp_wrapper(numbers)  # 传入参数
lp.print_stats()  # 查看LineProfiler的状态,显示各阶段的运行时间】

# 保存
# profile结果保存到test.lp
lp.dump_stats("test.lp")

# 重新载入
# 载入二进制文件
lstats = line_profiler.load_stats("test.lp")
# 打印到标准输出
line_profiler.show_text(lstats.timings, lstats.unit)