コード例 #1
0
ファイル: allPythonContent.py プロジェクト: Mondego/pyreco
class ProfilerMiddleware(object):
    def __init__(self):
        self.profiler = None

    def process_request(self, request):
        if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT', 'profile') in request.GET:
            self.profiler = Profiler()
            try:
                self.profiler.start()
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
                self.profiler = None


    def process_response(self, request, response):
        if self.profiler:
            try:
                self.profiler.stop()

                return HttpResponse(self.profiler.output_html())
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
            finally:
                self.profiler = None
        else:
            return response
コード例 #2
0
ファイル: middleware.py プロジェクト: asmeurer/pyinstrument
    def process_request(self, request):
        profile_dir = getattr(settings, 'PYINSTRUMENT_PROFILE_DIR', None)

        if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT', 'profile') in request.GET or profile_dir:
            profiler = Profiler()
            profiler.start()

            request.profiler = profiler
コード例 #3
0
    def process_request(self, request):
        profile_dir = getattr(settings, 'PYINSTRUMENT_PROFILE_DIR', None)
        use_signal = getattr(settings, 'PYINSTRUMENT_USE_SIGNAL', True)

        if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT', 'profile') in request.GET or profile_dir:
            profiler = Profiler(use_signal=use_signal)
            try:
                profiler.start()
                request.profiler = profiler
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
コード例 #4
0
ファイル: debug.py プロジェクト: 0xcd03/inbox
def attach_profiler():
    profiler = Profiler()
    profiler.start()

    def handle_signal(signum, frame):
        print profiler.output_text(color=True)
        # Work around an arguable bug in pyinstrument in which output gets
        # frozen after the first call to profiler.output_text()
        delattr(profiler, "_root_frame")

    signal.signal(signal.SIGTRAP, handle_signal)
コード例 #5
0
ファイル: debug.py プロジェクト: DrMoriarty/sync-engine
def attach_pyinstrument_profiler():
    """Run the pyinstrument profiler in the background and dump its output to
    stdout when the process receives SIGTRAP. In general, you probably want to
    use the facilities in inbox.util.profiling instead."""
    profiler = Profiler()
    profiler.start()

    def handle_signal(signum, frame):
        print profiler.output_text(color=True)
        # Work around an arguable bug in pyinstrument in which output gets
        # frozen after the first call to profiler.output_text()
        delattr(profiler, '_root_frame')

    signal.signal(signal.SIGTRAP, handle_signal)
コード例 #6
0
ファイル: __main__.py プロジェクト: mcfletch/pyinstrument
def main():
    usage = "usage: %prog [-h] [[-o output_file_path] scriptfile [arg] ...] | [ -i infile ]"
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False
    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help="output HTML instead of text", default=False)
    parser.add_option('', '--json',
        dest="output_json", action='store_true',
        help="output raw JSON dump instead of text or HTML", default=False)
    parser.add_option('-o', '--outfile',
        dest="outfile", action='store', 
        help="save stats to <outfile>", default=None)
    parser.add_option('-i', '--infile',
        dest="infile", action='store', 
        help="load stats from JSON file <infile>", default=None)

    if not sys.argv[1:]:
        parser.print_usage()
        sys.exit(2)

    (options, args) = parser.parse_args()
    sys.argv[:] = args

    if len(args) > 0:
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))

        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

        profiler = Profiler()
        profiler.start()

        try:
            exec code in globs, None
        except SystemExit, KeyboardInterrupt:
            pass

        profiler.stop()
        
        write_output( options, profiler )
コード例 #7
0
ファイル: middleware.py プロジェクト: zer0def/pyinstrument
    def process_request(self, request):
        profile_dir = getattr(settings, 'PYINSTRUMENT_PROFILE_DIR', None)
        use_signal = getattr(settings, 'PYINSTRUMENT_USE_SIGNAL', True)
        collect_args = getattr(settings, 'PYINSTRUMENT_COLLECT_ARGS', False)

        profiler = None
        if getattr(settings, 'PYINSTRUMENT_URL_COLLECT_ARGS_ARGUMENT', 'profile_collect_args') in request.GET:
            profiler = Profiler(use_signal=use_signal, collect_args=True)
        elif getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT', 'profile') in request.GET or profile_dir:
            profiler = Profiler(use_signal=use_signal, collect_args=collect_args)

        if profiler:
            try:
                profiler.start()
                request.profiler = profiler
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
コード例 #8
0
ファイル: allPythonContent.py プロジェクト: Mondego/pyreco
 def process_request(self, request):
     if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT', 'profile') in request.GET:
         self.profiler = Profiler()
         try:
             self.profiler.start()
         except NotMainThreadError:
             raise NotMainThreadError(not_main_thread_message)
             self.profiler = None
コード例 #9
0
ファイル: debug.py プロジェクト: 0xcd03/inbox
 def wrapper(*args, **kwargs):
     profiler = Profiler()
     profiler.start()
     r = func(*args, **kwargs)
     profiler.stop()
     print profiler.output_text(color=True)
     return r
コード例 #10
0
ファイル: allPythonContent.py プロジェクト: Mondego/pyreco
def main():
    usage = "usage: %prog [-h] [-o output_file_path] scriptfile [arg] ..."
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False
    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help="output HTML instead of text", default=False)
    parser.add_option('-o', '--outfile',
        dest="outfile", action='store', 
        help="save stats to <outfile>", default=None)

    if not sys.argv[1:]:
        parser.print_usage()
        sys.exit(2)

    (options, args) = parser.parse_args()
    sys.argv[:] = args

    if len(args) > 0:
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))

        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

        profiler = Profiler()
        profiler.start()

        try:
            exec code in globs, None
        except SystemExit, KeyboardInterrupt:
            pass

        profiler.stop()

        if options.outfile:
            f = codecs.open(options.outfile, 'w', 'utf-8')
            unicode = True
            color = False
        else:
            f = sys.stdout
            unicode = stdout_supports_unicode()
            color = stdout_supports_color()

        if options.output_html:
            f.write(profiler.output_html())
        else:
            f.write(profiler.output_text(unicode=unicode, color=color))

        f.close()
コード例 #11
0
def run_profiling(args):
    lprofiler = LineProfiler() 

    monitor_fuctions = [api.problem.submit_key, api.problem.get_unlocked_pids, api.problem.get_solved_pids,
                        api.problem.get_all_problems, api.problem.get_solved_problems, api.stats.get_score,
                        api.cache.memoize, api.autogen.grade_problem_instance, api.autogen.get_problem_instance,
                        api.autogen.get_number_of_instances]

    for func in monitor_fuctions:
        lprofiler.add_function(func)

    lprofiler.enable()

    if args.stack:
        profiler = Profiler(use_signal=False)
        profiler.start()

    for func, a, kw in operations:
        func(*a, **kw)

    if args.stack:
        profiler.stop()

    lprofiler.disable()

    if args.print:
        print(profiler.output_text(unicode=True, color=True))
        lprofiler.print_stats()

    output = open(args.output, "w")

    if args.stack:
        output.write(profiler.output_text(unicode=True))

        if args.output_html is not None:
            output_html = open(args.output_html, "w")
            output_html.write(profiler.output_html())
            output_html.close()
            print("Wrote test info to " + args.output_html)

    lprofiler.print_stats(output)
    output.close()
    print("Wrote test info to " + args.output)
コード例 #12
0
def test_profiler_retains_multiple_calls():
    profiler = Profiler()
    profiler.start()

    long_function_a()
    long_function_b()
    long_function_a()
    long_function_b()

    profiler.stop()

    print(profiler.output_text())

    frame = profiler.last_session.root_frame()
    assert frame.function == 'test_profiler_retains_multiple_calls'
    assert len(frame.children) == 4
コード例 #13
0
ファイル: tests.py プロジェクト: bhallapoorva/edx-ora2
    def setUp(self, problem_type, staff=False):
        """
        Configure page objects to test Open Assessment.

        Args:
            problem_type (str): The type of problem being tested,
              used to choose which part of the course to load.
            staff (bool): If True, runs the test with a staff user (defaults to False).

        """
        super(OpenAssessmentTest, self).setUp()

        if PROFILING_ENABLED:
            self.profiler = Profiler(use_signal=False)
            self.profiler.start()

        self.problem_loc = self.PROBLEM_LOCATIONS[problem_type]
        self.auto_auth_page = AutoAuthPage(self.browser, course_id=self.TEST_COURSE_ID, staff=staff)
        self.submission_page = SubmissionPage(self.browser, self.problem_loc)
        self.self_asmnt_page = AssessmentPage('self-assessment', self.browser, self.problem_loc)
        self.peer_asmnt_page = AssessmentPage('peer-assessment', self.browser, self.problem_loc)
        self.student_training_page = AssessmentPage('student-training', self.browser, self.problem_loc)
        self.staff_asmnt_page = AssessmentPage('staff-assessment', self.browser, self.problem_loc)
        self.grade_page = GradePage(self.browser, self.problem_loc)
コード例 #14
0
def test_collapses_multiple_calls_by_default():
    profiler = Profiler()
    profiler.start()

    long_function_a()
    long_function_b()
    long_function_a()
    long_function_b()

    profiler.stop()

    text_output = profiler.output_text()

    # output should be something like:
    # 1.513 test_collapses_multiple_calls_by_default  test/test_profiler.py:25
    # |- 0.507 long_function_a  test/test_profiler.py:17
    # |- 0.503 long_function_b  test/test_profiler.py:20

    assert text_output.count('test_collapses_multiple_calls_by_default') == 1
    assert text_output.count('long_function_a') == 1
    assert text_output.count('long_function_b') == 1
コード例 #15
0
def test_two_functions():
    profiler = Profiler()
    profiler.start()

    long_function_a()
    long_function_b()

    profiler.stop()

    print(profiler.output_text())

    frame = profiler.last_session.root_frame()

    assert frame.function == 'test_two_functions'
    assert len(frame.children) == 2

    frame_b, frame_a = sorted(frame.children, key=lambda f: f.time(), reverse=True)

    assert frame_a.function == 'long_function_a'
    assert frame_b.function == 'long_function_b'
    assert 0.2 < frame_a.time() < 0.3
    assert 0.45 < frame_b.time() < 0.55
コード例 #16
0
import gym
import cv2
from gym.wrappers import Monitor
import gym_desktop
import time
import pickle
from pyinstrument import Profiler

# loads a record of actions (from utils/replay.py) and runs perfroms them in the environment

profiler = Profiler()
profiler.start()

env = gym.make('Desktop-v0', debug=True, show=True)
max_ep = 10
actions = []

with open('listfile.data', 'rb') as filehandle:
    actions = pickle.load(filehandle)

# Run Environment
step_cnt = 0
ep_reward = 0
done = False
state = env.reset()

while not done:
    if step_cnt >= len(actions):
        done = True
        break
    next_state, reward, done, _ = env.step([actions[step_cnt]])
コード例 #17
0
def main():
    usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False

    parser.add_option('', '--setprofile',
        dest='setprofile', action='store_true',
        help='run in setprofile mode, instead of signal mode', default=False)

    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help="output HTML instead of text", default=False)
    parser.add_option('', '--flame',
        dest='output_flame', action='store_true',
        help='output an HTML flame chart', default=False)
    parser.add_option('-r', '--renderer',
        dest='output_renderer', action='store', type='string',
        help='python import path to a renderer class', default=None)

    parser.add_option('-o', '--outfile',
        dest="outfile", action='store',
        help="save report to <outfile>", default=None)

    parser.add_option('', '--unicode',
        dest='unicode', action='store_true',
        help='force unicode text output')
    parser.add_option('', '--no-unicode',
        dest='unicode', action='store_false',
        help='force ascii text output')

    parser.add_option('', '--color',
        dest='color', action='store_true',
        help='force ansi color text output')
    parser.add_option('', '--no-color',
        dest='color', action='store_false',
        help='force no color text output')

    parser.add_option('-m', '',
        dest='module_name', action='store',
        help='searches sys.path for the named module and runs the  corresponding .py file as a script.')

    if not sys.argv[1:]:
        parser.print_help()
        sys.exit(2)

    options, args = parser.parse_args()

    if args == [] and options.module_name is None:
        parser.print_help()
        sys.exit(2)

    if options.module_name is not None:
        sys.argv[:] = [options.module_name] + args
        code = "run_module(modname, run_name='__main__')"
        globs = {
            'run_module': runpy.run_module,
            'modname': options.module_name
        }
    else:
        sys.argv[:] = args
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))
        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

    if options.output_renderer:
        renderer = options.output_renderer
    elif options.output_html:
        renderer = 'html'
    else:
        renderer = 'text'

    recorder = get_renderer_class(renderer).preferred_recorder

    profiler = Profiler(recorder=recorder)

    profiler.start()

    try:
        exec_(code, globs, None)
    except (SystemExit, KeyboardInterrupt):
        pass

    profiler.stop()

    if options.outfile:
        f = codecs.open(options.outfile, 'w', 'utf-8')
    else:
        f = sys.stdout

    renderer_kwargs = {}

    if renderer == 'text':
        unicode_override = options.unicode != None
        color_override = options.color != None
        unicode = options.unicode if unicode_override else file_supports_unicode(f)
        color = options.color if color_override else file_supports_color(f)
        
        renderer_kwargs = {'unicode': unicode, 'color': color}

    f.write(profiler.output(renderer=renderer, **renderer_kwargs))
    f.close()
コード例 #18
0
ファイル: run_clustering.py プロジェクト: IlyaGusev/purano
    clusterer.save(output_file)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--input-file",
                        type=str,
                        default="output/train_annotated.db")
    parser.add_argument("--nrows", type=int, default=None)
    parser.add_argument("--sort-by-date", default=False, action='store_true')
    parser.add_argument("--start-date", type=str, default=None)
    parser.add_argument("--end-date", type=str, default=None)
    parser.add_argument("--config", type=str, required=True)
    parser.add_argument("--output-file",
                        type=str,
                        default="output/clusters.json")
    parser.add_argument("--perfomance-log",
                        type=str,
                        default="clustering_performance.txt")

    args = parser.parse_args()
    profiler = Profiler()
    profiler.start()
    args = vars(args)
    perfomance_log = args.pop("perfomance_log")
    cluster(**args)
    profiler.stop()

    with open(perfomance_log, "w") as w:
        w.write(profiler.output_text(unicode=True, color=True, show_all=True))
コード例 #19
0
ファイル: profile.py プロジェクト: pyamg/pyamg-examples
from pyinstrument import Profiler

import numpy as np
import pyamg

profiler = Profiler()
profiler.start()

n = int(1e3)
A = pyamg.gallery.poisson((n, n), format='csr')
b = np.random.rand(A.shape[0])

ml = pyamg.smoothed_aggregation_solver(A, max_coarse=10)
res = []
x = ml.solve(b, accel='cg', residuals=res)
print(len(res))

profiler.stop()

print(profiler.output_text(unicode=True, color=True))
コード例 #20
0
ファイル: cosumnes_2020.py プロジェクト: ceff-tech/belleflopt
from belleflopt import support

from pyinstrument import Profiler

profiler = Profiler()
profiler.start()

support.run_optimize_new(NFE=50, popsize=10, use_comet=False)

profiler.stop()

print(profiler.output_text(unicode=False, color=False))

コード例 #21
0
ファイル: __main__.py プロジェクト: joerick/pyinstrument
def main():
    usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
    version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
        v=pyinstrument.__version__,
        pyv=sys.version_info,
    )
    parser = optparse.OptionParser(usage=usage, version=version_string)
    parser.allow_interspersed_args = False

    def dash_m_callback(option, opt, value, parser):
        parser.values.module_name = value
        # everything after the -m argument should be passed to that module
        parser.values.module_args = parser.rargs + parser.largs
        parser.rargs[:] = []
        parser.largs[:] = []

    parser.add_option('', '--load-prev',
        dest='load_prev', action='store', metavar='ID',
        help="Instead of running a script, load a previous report")

    parser.add_option('-m', '',
        dest='module_name', action='callback', callback=dash_m_callback,
        type="str",
        help="run library module as a script, like 'python -m module'")

    parser.add_option('-o', '--outfile',
        dest="outfile", action='store',
        help="save to <outfile>", default=None)

    parser.add_option('-r', '--renderer',
        dest='renderer', action='store', type='string',
        help=("how the report should be rendered. One of: 'text', 'html', 'json', or python "
              "import path to a renderer class"),
        default='text')

    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help=optparse.SUPPRESS_HELP, default=False)  # deprecated shortcut for --renderer=html

    parser.add_option('-t', '--timeline',
        dest='timeline', action='store_true',
        help="render as a timeline - preserve ordering and don't condense repeated calls")

    parser.add_option('', '--hide',
        dest='hide_fnmatch', action='store', metavar='EXPR',
        help=("glob-style pattern matching the file paths whose frames to hide. Defaults to "
              "'*{sep}lib{sep}*'.").format(sep=os.sep),
        default='*{sep}lib{sep}*'.format(sep=os.sep))
    parser.add_option('', '--hide-regex',
        dest='hide_regex', action='store', metavar='REGEX',
        help=("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
              "enough control."))

    parser.add_option('', '--show',
        dest='show_fnmatch', action='store', metavar='EXPR',
        help=("glob-style pattern matching the file paths whose frames to "
              "show, regardless of --hide or --hide-regex. For example, use "
              "--show '*/<library>/*' to show frames within a library that "
              "would otherwise be hidden."))
    parser.add_option('', '--show-regex',
        dest='show_regex', action='store', metavar='REGEX',
        help=("regex matching the file paths whose frames to always show. "
              "Useful if --show doesn't give enough control."))
    parser.add_option('', '--show-all',
        dest='show_all', action='store_true',
        help="show everything", default=False)

    parser.add_option('', '--unicode',
        dest='unicode', action='store_true',
        help='(text renderer only) force unicode text output')
    parser.add_option('', '--no-unicode',
        dest='unicode', action='store_false',
        help='(text renderer only) force ascii text output')

    parser.add_option('', '--color',
        dest='color', action='store_true',
        help='(text renderer only) force ansi color text output')
    parser.add_option('', '--no-color',
        dest='color', action='store_false',
        help='(text renderer only) force no color text output')

    if not sys.argv[1:]:
        parser.print_help()
        sys.exit(2)

    options, args = parser.parse_args()

    if args == [] and options.module_name is None and options.load_prev is None:
        parser.print_help()
        sys.exit(2)

    if not options.hide_regex:
        options.hide_regex = fnmatch.translate(options.hide_fnmatch)
    
    if not options.show_regex and options.show_fnmatch:
        options.show_regex = fnmatch.translate(options.show_fnmatch)
     
    if options.show_all:
        options.show_regex = r'.*'

    if options.load_prev:
        session = load_report(options.load_prev)
    else:
        if options.module_name is not None:
            sys.argv[:] = [options.module_name] + options.module_args
            code = "run_module(modname, run_name='__main__')"
            globs = {
                'run_module': runpy.run_module,
                'modname': options.module_name
            }
        else:
            sys.argv[:] = args
            progname = args[0]
            sys.path.insert(0, os.path.dirname(progname))
            with open(progname, 'rb') as fp:
                code = compile(fp.read(), progname, 'exec')
            globs = {
                '__file__': progname,
                '__name__': '__main__',
                '__package__': None,
            }

        profiler = Profiler()

        profiler.start()

        try:
            exec_(code, globs, None)
        except (SystemExit, KeyboardInterrupt):
            pass

        profiler.stop()
        session = profiler.last_session

    if options.output_html:
        options.renderer = 'html'

    output_to_temp_file = (options.renderer == 'html'
                           and not options.outfile
                           and file_is_a_tty(sys.stdout))

    if options.outfile:
        f = codecs.open(options.outfile, 'w', 'utf-8')
        should_close_f_after_writing = True
    elif not output_to_temp_file:
        if PY2:
            f = codecs.getwriter('utf-8')(sys.stdout)
        else:
            f = sys.stdout
        should_close_f_after_writing = False

    renderer_kwargs = {'processor_options': {
        'hide_regex': options.hide_regex,
        'show_regex': options.show_regex,
    }}

    if options.timeline is not None:
        renderer_kwargs['timeline'] = options.timeline

    if options.renderer == 'text':
        unicode_override = options.unicode != None
        color_override = options.color != None
        unicode = options.unicode if unicode_override else file_supports_unicode(f)
        color = options.color if color_override else file_supports_color(f)
        
        renderer_kwargs.update({'unicode': unicode, 'color': color})

    renderer_class = get_renderer_class(options.renderer)
    renderer = renderer_class(**renderer_kwargs)

    # remove this frame from the trace
    renderer.processors.append(remove_first_pyinstrument_frame_processor)


    if output_to_temp_file:
        output_filename = renderer.open_in_browser(session)
        print('stdout is a terminal, so saved profile output to %s' % output_filename)
    else:
        f.write(renderer.render(session))
        if should_close_f_after_writing:
            f.close()

    if options.renderer == 'text':
        _, report_identifier = save_report(session)
        print('To view this report with different options, run:')
        print('    pyinstrument --load-prev %s [options]' % report_identifier)
        print('')
コード例 #22
0
ファイル: tests.py プロジェクト: bhallapoorva/edx-ora2
class OpenAssessmentTest(WebAppTest):
    """
    UI-level acceptance tests for Open Assessment.
    """
    TEST_COURSE_ID = "course-v1:edx+ORA203+course"

    PROBLEM_LOCATIONS = {
        'staff_only':
            u'courses/{test_course_id}/courseware/'
            u'61944efb38a349edb140c762c7419b50/415c3ee1b7d04b58a1887a6fe82b31d6/'.format(test_course_id=TEST_COURSE_ID),
        'self_only':
            u'courses/{test_course_id}/courseware/'
            u'a4dfec19cf9b4a6fb5b18be6ccd9cecc/338a4affb58a45459629e0566291381e/'.format(test_course_id=TEST_COURSE_ID),
        'peer_only':
            u'courses/{test_course_id}/courseware/'
            u'a4dfec19cf9b4a6fb5b18be6ccd9cecc/417e47b2663a4f79b62dba20b21628c8/'.format(test_course_id=TEST_COURSE_ID),
        'student_training':
            u'courses/{test_course_id}/courseware/'
            u'676026889c884ac1827688750871c825/5663e9b038434636977a4226d668fe02/'.format(test_course_id=TEST_COURSE_ID),
        'file_upload':
            u'courses/{test_course_id}/courseware/'
            u'57a3f9d51d424f6cb922f0d69cba868d/bb563abc989340d8806920902f267ca3/'.format(test_course_id=TEST_COURSE_ID),
        'full_workflow_staff_override':
            u'courses/{test_course_id}/courseware/'
            u'676026889c884ac1827688750871c825/181ea9ff144c4766be44eb8cb360e34f/'.format(test_course_id=TEST_COURSE_ID),
        'full_workflow_staff_required':
            u'courses/{test_course_id}/courseware/'
            u'8d9584d242b44343bc270ea5ef04ab03/0b0dcc728abe45138c650732af178afb/'.format(test_course_id=TEST_COURSE_ID),
    }

    SUBMISSION = u"This is a test submission."
    LATEX_SUBMISSION = u"[mathjaxinline]( \int_{0}^{1}xdx )[/mathjaxinline]"
    OPTIONS_SELECTED = [1, 2]
    STAFF_OVERRIDE_OPTIONS_SELECTED = [0, 1]
    STAFF_OVERRIDE_SCORE = 1
    STAFF_GRADE_EXISTS = "COMPLETE"
    STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE = "YOU MUST COMPLETE THE STEPS ABOVE TO VIEW YOUR GRADE"
    STAFF_AREA_SCORE = "Final grade: {} out of 8"
    STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE = "The problem has not been completed."
    EXPECTED_SCORE = 6
    STUDENT_TRAINING_OPTIONS = [
        [1, 2],
        [0, 2]
    ]

    TEST_PASSWORD = "******"

    def setUp(self, problem_type, staff=False):
        """
        Configure page objects to test Open Assessment.

        Args:
            problem_type (str): The type of problem being tested,
              used to choose which part of the course to load.
            staff (bool): If True, runs the test with a staff user (defaults to False).

        """
        super(OpenAssessmentTest, self).setUp()

        if PROFILING_ENABLED:
            self.profiler = Profiler(use_signal=False)
            self.profiler.start()

        self.problem_loc = self.PROBLEM_LOCATIONS[problem_type]
        self.auto_auth_page = AutoAuthPage(self.browser, course_id=self.TEST_COURSE_ID, staff=staff)
        self.submission_page = SubmissionPage(self.browser, self.problem_loc)
        self.self_asmnt_page = AssessmentPage('self-assessment', self.browser, self.problem_loc)
        self.peer_asmnt_page = AssessmentPage('peer-assessment', self.browser, self.problem_loc)
        self.student_training_page = AssessmentPage('student-training', self.browser, self.problem_loc)
        self.staff_asmnt_page = AssessmentPage('staff-assessment', self.browser, self.problem_loc)
        self.grade_page = GradePage(self.browser, self.problem_loc)

    def log_to_file(self):
        with open('{}-profile.log'.format(self.id()), 'w') as f:
            f.write(self.profiler.output_text())

    def tearDown(self):
        if PROFILING_ENABLED:
            self.profiler.stop()
            self.log_to_file()

    def login_user(self, learner, email):
        """
        Logs in an already existing user.

        Args:
            learner (str): the username of the user.
            email (str): email address of the user.
        """
        auto_auth_page = AutoAuthPage(
            self.browser, email=email, password=self.TEST_PASSWORD, username=learner,
            course_id=self.TEST_COURSE_ID, staff=True
        )
        auto_auth_page.visit()

    def do_self_assessment(self):
        """
        Creates a user, submits a self assessment, verifies the grade, and returns the username of the
        learner for which the self assessment was submitted.
        """
        self.auto_auth_page.visit()
        username, _ = self.auto_auth_page.get_username_and_email()
        self.submission_page.visit().submit_response(self.SUBMISSION)
        self.assertTrue(self.submission_page.has_submitted)

        # Submit a self-assessment
        self.submit_self_assessment(self.OPTIONS_SELECTED)

        # Verify the grade
        self.assertEqual(self.EXPECTED_SCORE, self.grade_page.wait_for_page().score)

        return username

    def submit_self_assessment(self, options=OPTIONS_SELECTED):
        """
        Submit a self assessment for the currently logged in student. Do not verify grade.

        Args:
            options: the options to select for the self assessment
                (will use OPTIONS_SELECTED if not specified)
        """
        self.self_asmnt_page.wait_for_page().wait_for_response()
        self.assertIn(self.SUBMISSION, self.self_asmnt_page.response_text)
        self.self_asmnt_page.assess("self", options).wait_for_complete()
        self.assertTrue(self.self_asmnt_page.is_complete)

    def _verify_staff_grade_section(self, expected_status, expected_message_title):
        """
        Verifies the expected status and message text in the Staff Grade section
        (as shown to the learner).
        """
        self.staff_asmnt_page.wait_for_page()
        self.assertEqual("Staff Grade", self.staff_asmnt_page.label)
        self.staff_asmnt_page.verify_status_value(expected_status)
        self.assertEqual(expected_message_title, self.staff_asmnt_page.message_title)

    def do_training(self):
        """
        Complete two training examples, satisfying the requirements.
        """
        for example_num, options_selected in enumerate(self.STUDENT_TRAINING_OPTIONS):
            if example_num > 0:
                try:
                    self.student_training_page.wait_for_num_completed(example_num)
                except BrokenPromise:
                    msg = "Did not complete at least {num} student training example(s).".format(num=example_num)
                    self.fail(msg)

            self.student_training_page.wait_for_page().wait_for_response().assess("training", options_selected)

        # Check that we've completed student training
        try:
            self.student_training_page.wait_for_complete()
        except BrokenPromise:
            self.fail("Student training was not marked complete.")

    def do_peer_assessment(self, count=1, options=OPTIONS_SELECTED):
        """
        Does the specified number of peer assessments.

        Args:
            count: the number of assessments that must be completed (defaults to 1)
            options: the options to use (defaults to OPTIONS_SELECTED)
        """
        self.peer_asmnt_page.visit()

        for count_assessed in range(1, count + 1):
            self.peer_asmnt_page.wait_for_page().wait_for_response().assess("peer", options)
            self.peer_asmnt_page.wait_for_num_completed(count_assessed)

    def do_staff_override(self, username, final_score=STAFF_AREA_SCORE.format(STAFF_OVERRIDE_SCORE)):
        """
        Complete a staff assessment (grade override).

        Args:
            username: the learner to grade
            final_score: the expected final score as shown in the staff area
                (defaults to the staff override score value)
        """
        self.staff_area_page.visit()
        self.staff_area_page.show_learner(username)
        self.staff_area_page.expand_learner_report_sections()
        self.staff_area_page.staff_assess(self.STAFF_OVERRIDE_OPTIONS_SELECTED, "override")
        self.staff_area_page.verify_learner_final_score(final_score)

    def do_staff_assessment(self, number_to_assess=0, options_selected=OPTIONS_SELECTED):
        """
        Use staff tools to assess available responses.

        Args:
            number_to_assess: the number of submissions to assess. If not provided (or 0),
                will grade all available submissions.
        """
        self.staff_area_page.visit()
        self.staff_area_page.click_staff_toolbar_button("staff-grading")
        # Get the counts before checking out a submission for assessment.
        start_numbers = self.staff_area_page.available_checked_out_numbers
        # Check out a submission.
        self.staff_area_page.expand_staff_grading_section()
        # Checked out number should increase, ungraded decrease.
        ungraded = start_numbers[0]-1
        checked_out = start_numbers[1]+1
        self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out))
        assessed = 0
        while number_to_assess == 0 or assessed < number_to_assess:
            continue_after = False if number_to_assess-1 == assessed else ungraded > 0
            self.staff_area_page.staff_assess(options_selected, "full-grade", continue_after)
            assessed += 1
            if not continue_after:
                self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out-1))
                break
            else:
                ungraded -=1
                self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out))

    def refresh_page(self):
        """
        Helper method that waits for "unsaved changes" warnings to clear before refreshing the page.
        """
        EmptyPromise(
            lambda: self.browser.execute_script("return window.onbeforeunload === null"),
            "Unsubmitted changes exist on page."
        ).fulfill()
        self.browser.refresh()
コード例 #23
0
def bench_publish(endpoint, appkey, channel, size, profile, ack=True):
    publisher = satori.rtm.connection.Connection(endpoint + '?appkey=' +
                                                 appkey)
    publisher.start()

    message = binascii.hexlify(os.urandom(size // 2)).decode('ascii')
    print('Message size is {}'.format(len(message)))

    last_usage = [resource.getrusage(resource.RUSAGE_SELF)]
    print(
        'Duration, s\tRate, msgs/s\tMax RSS, MB\tUser time, s\tSystem time, s')

    def report(duration, count):
        usage = resource.getrusage(resource.RUSAGE_SELF)
        maxrss = usage.ru_maxrss // 1024
        if sys.platform == 'darwin':
            maxrss = maxrss // 1024
        print('{0:2.2f}\t\t{1}\t\t{2}\t\t{3:2.2f}\t\t{4:2.2f}'.format(
            duration, int(count / duration), maxrss,
            usage.ru_utime - last_usage[0].ru_utime,
            usage.ru_stime - last_usage[0].ru_stime))
        sys.stdout.flush()
        last_usage[0] = usage

    count = [0]

    def publish_without_ack():
        publisher.publish(channel, message)
        count[0] += 1

    def publish_with_ack():
        def callback(ack):
            count[0] += 1

        publisher.publish(channel, message, callback)

    publish = publish_with_ack if ack else publish_without_ack

    before = time.time()
    try:
        if profile:
            profiler = Profiler()
            profiler.start()
        while True:
            now = time.time()
            if now - before >= sampling_interval:
                report(now - before, count[0])
                if profile:
                    profiler.stop()
                    print(profiler.output_text(unicode=True, color=True))
                    profiler = Profiler()
                    profiler.start()
                count[0] = 0
                before = time.time()
            publish()
    except KeyboardInterrupt:
        sys.exit(0)
コード例 #24
0
ファイル: impala.py プロジェクト: yushu-liu/adeptRL
def main(args):
    # host needs to broadcast timestamp so all procs create the same log dir
    if rank == 0:
        timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
        log_id = make_log_id_from_timestamp(
            args.tag, args.mode_name, args.agent,
            args.vision_network + args.network_body, timestamp)
        log_id_dir = os.path.join(args.log_dir, args.env_id, log_id)
        os.makedirs(log_id_dir)
        saver = SimpleModelSaver(log_id_dir)
        print_ascii_logo()
    else:
        timestamp = None
    timestamp = comm.bcast(timestamp, root=0)

    if rank != 0:
        log_id = make_log_id_from_timestamp(
            args.tag, args.mode_name, args.agent,
            args.vision_network + args.network_body, timestamp)
        log_id_dir = os.path.join(args.log_dir, args.env_id, log_id)

    comm.Barrier()

    # construct env
    seed = args.seed if rank == 0 else args.seed + (
        args.nb_env * (rank - 1))  # unique seed per process
    env = make_env(args, seed)

    # construct network
    torch.manual_seed(args.seed)
    network_head_shapes = get_head_shapes(env.action_space, env.engine,
                                          args.agent)
    network = make_network(env.observation_space, network_head_shapes, args)

    # sync network params
    if rank == 0:
        for v in network.parameters():
            comm.Bcast(v.detach().cpu().numpy(), root=0)
        print('Root variables synced')
    else:
        # can just use the numpy buffers
        variables = [v.detach().cpu().numpy() for v in network.parameters()]
        for v in variables:
            comm.Bcast(v, root=0)
        for shared_v, model_v in zip(variables, network.parameters()):
            model_v.data.copy_(torch.from_numpy(shared_v), non_blocking=True)
        print('{} variables synced'.format(rank))

    # construct agent
    # host is always the first gpu, workers are distributed evenly across the rest
    if len(args.gpu_id) > 1:  # nargs is always a list
        if rank == 0:
            gpu_id = args.gpu_id[0]
        else:
            gpu_id = args.gpu_id[1:][(rank - 1) % len(args.gpu_id[1:])]
    else:
        gpu_id = args.gpu_id[-1]
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    cudnn = True
    # disable cudnn for dynamic batches
    if rank == 0 and args.max_dynamic_batch > 0:
        cudnn = False

    torch.backends.cudnn.benchmark = cudnn
    agent = make_agent(network, device, env.engine, env.gpu_preprocessor, args)

    # workers
    if rank != 0:
        logger = make_logger(
            'ImpalaWorker{}'.format(rank),
            os.path.join(log_id_dir, 'train_log{}.txt'.format(rank)))
        summary_writer = SummaryWriter(os.path.join(log_id_dir, str(rank)))
        container = ImpalaWorker(agent,
                                 env,
                                 args.nb_env,
                                 logger,
                                 summary_writer,
                                 use_local_buffers=args.use_local_buffers)

        # Run the container
        if args.profile:
            try:
                from pyinstrument import Profiler
            except:
                raise ImportError(
                    'You must install pyinstrument to use profiling.')
            profiler = Profiler()
            profiler.start()
            container.run()
            profiler.stop()
            print(profiler.output_text(unicode=True, color=True))
        else:
            container.run()
        env.close()
    # host
    else:
        logger = make_logger(
            'ImpalaHost',
            os.path.join(log_id_dir, 'train_log{}.txt'.format(rank)))
        summary_writer = SummaryWriter(os.path.join(log_id_dir, str(rank)))
        log_args(logger, args)
        write_args_file(log_id_dir, args)
        logger.info('Network Parameter Count: {}'.format(
            count_parameters(network)))

        # no need for the env anymore
        env.close()

        # Construct the optimizer
        def make_optimizer(params):
            opt = torch.optim.RMSprop(params,
                                      lr=args.learning_rate,
                                      eps=1e-5,
                                      alpha=0.99)
            return opt

        container = ImpalaHost(agent,
                               comm,
                               make_optimizer,
                               summary_writer,
                               args.summary_frequency,
                               saver,
                               args.epoch_len,
                               args.host_training_info_interval,
                               use_local_buffers=args.use_local_buffers)

        # Run the container
        if args.profile:
            try:
                from pyinstrument import Profiler
            except:
                raise ImportError(
                    'You must install pyinstrument to use profiling.')
            profiler = Profiler()
            profiler.start()
            if args.max_dynamic_batch > 0:
                container.run(args.max_dynamic_batch,
                              args.max_queue_length,
                              args.max_train_steps,
                              dynamic=True,
                              min_dynamic_batch=args.min_dynamic_batch)
            else:
                container.run(args.num_rollouts_in_batch,
                              args.max_queue_length, args.max_train_steps)
            profiler.stop()
            print(profiler.output_text(unicode=True, color=True))
        else:
            if args.max_dynamic_batch > 0:
                container.run(args.max_dynamic_batch,
                              args.max_queue_length,
                              args.max_train_steps,
                              dynamic=True,
                              min_dynamic_batch=args.min_dynamic_batch)
            else:
                container.run(args.num_rollouts_in_batch,
                              args.max_queue_length, args.max_train_steps)
コード例 #25
0
 def __enter__(self):
     if self.profile:
         self.profiler = profiler = Profiler()
         profiler.start()
コード例 #26
0
def test_calculate_efh(profile=False):

    slope_angle = -15.0
    approach_len = 40
    takeoff_angle = 25.0
    fall_height = 0.5
    skier = Skier()

    slope, approach, takeoff, landing, landing_trans, flight, outputs = \
        make_jump(slope_angle, 0.0, approach_len, takeoff_angle, fall_height)

    if profile:
        from pyinstrument import Profiler
        p = Profiler()
        p.start()

    dist, efh, speeds = landing.calculate_efh(np.deg2rad(takeoff_angle),
                                              takeoff.end, skier)
    if profile:
        p.stop()
        print(p.output_text(unicode=True, color=True))

    expected_speeds = \
        np.array([ 0.        ,  0.64634268,  1.2356876 ,  1.76885108,  2.24964887,
                   2.69168606,  3.09866358,  3.47053895,  3.81252508,  4.12895563,
                   4.42290851,  4.69632597,  4.95135992,  5.1898158 ,  5.41341136,
                   5.62352591,  5.82141688,  6.00814636,  6.18473831,  6.35203728,
                   6.51082732,  6.66178867,  6.80554642,  6.94264205,  7.07360671,
                   7.19885863,  7.31883812,  7.43389184,  7.54435492,  7.65055735,
                   7.75276447,  7.85122643,  7.94619036,  8.03785699,  8.12644327,
                   8.21211438,  8.29503769,  8.37538282,  8.45328143,  8.5288697 ,
                   8.60226895,  8.67359403,  8.74294598,  8.81043762,  8.87615365,
                   8.94017827,  9.00259044,  9.06346839,  9.12288563,  9.18090629,
                   9.2375861 ,  9.29299052,  9.34717415,  9.40018621,  9.45207528,
                   9.50288513,  9.55266139,  9.60144541,  9.64915056,  9.69601049,
                   9.74202917,  9.78719679,  9.83154369,  9.87510007,  9.91785869,
                   9.95991937, 10.00126913, 10.04193202, 10.08192875, 10.12129032,
                  10.16002962, 10.19816754, 10.23572199, 10.27229304, 10.30913058,
                  10.34504771, 10.3800144 , 10.41491013, 10.44931605, 10.4832593 ,
                  10.51674639, 10.54978999, 10.5824021 , 10.61459589, 10.64642594])

    np.testing.assert_allclose(np.diff(dist), 0.2 * np.ones(len(dist) - 1))
    np.testing.assert_allclose(efh[0], 0.0)
    np.testing.assert_allclose(efh[1:], fall_height, rtol=0.0, atol=8e-3)
    np.testing.assert_allclose(speeds,
                               expected_speeds,
                               rtol=3.0e-5,
                               atol=3.0e-4)

    dist, _, _ = landing.calculate_efh(np.deg2rad(takeoff_angle),
                                       takeoff.end,
                                       skier,
                                       increment=0.1)
    np.testing.assert_allclose(np.diff(dist), 0.1 * np.ones(len(dist) - 1))

    # Check if a surface that is before the takeoff point gives an error
    with pytest.raises(InvalidJumpError):
        dist, _, _ = takeoff.calculate_efh(np.deg2rad(takeoff_angle),
                                           takeoff.end, skier)

    # Create a surface with takeoff and landing to check if function only
    # calculates takeoff point and beyond
    x = np.concatenate([takeoff.x, landing.x])
    y = np.concatenate([takeoff.y, landing.y])
    new_surf = Surface(x, y)
    dist, efh, _ = new_surf.calculate_efh(np.deg2rad(takeoff_angle),
                                          takeoff.end, skier)
    np.testing.assert_allclose(efh[0], 0.0)
    np.testing.assert_allclose(efh[1:], fall_height, rtol=0.0, atol=8e-3)
    np.testing.assert_allclose(np.diff(dist), 0.2 * np.ones(len(dist) - 1))

    # Create a surface where distance values are not monotonic
    with pytest.raises(InvalidJumpError):
        Surface([2, 1, 3], [2, 4, 5])

    # Test takeoff angle greater than pi/2
    with pytest.raises(InvalidJumpError):
        new_surf.calculate_efh(np.pi, takeoff.end, skier)

    # Test function when takeoff point is in the first quadrant relative to
    # initial takeoff point (takeoff.end)
    takeoff_quad1 = (landing.start[0] + 2, landing.start[1] + 2)
    _, efh1, _ = landing.calculate_efh(np.deg2rad(takeoff_angle),
                                       takeoff_quad1,
                                       skier,
                                       increment=0.2)
    expected_quad1 = \
        np.array([1.7835165, 1.78401272, 1.78217823, 1.77987242, 1.77628465,
                  1.76998945, 1.76337001, 1.75578132, 1.7473959, 1.73834729,
                  1.72865671, 1.71840936, 1.70768518, 1.69658602, 1.68518452,
                  1.67349779, 1.66157619, 1.64946937, 1.6372259, 1.62488912,
                  1.61246859, 1.59999239, 1.58749806, 1.57501062, 1.5625446,
                  1.55011972, 1.53774518, 1.52543527, 1.51321161, 1.50108674,
                  1.48906131, 1.47714836, 1.4653533, 1.45368229, 1.44214059,
                  1.43073275, 1.41946312, 1.40833569, 1.3973575, 1.38652761,
                  1.37583968, 1.3653022, 1.35491758, 1.34467922, 1.33459477,
                  1.3246687, 1.314938, 1.3053352, 1.2958138, 1.28635201,
                  1.27717301, 1.26814732, 1.25926159, 1.25050445, 1.24186084,
                  1.2333902, 1.22506106, 1.21686473, 1.20880351, 1.20087464,
                  1.19308445, 1.18542684, 1.17789294, 1.17046884, 1.16317604,
                  1.15598451, 1.14875674, 1.14161781, 1.13468529, 1.12802913,
                  1.12142533, 1.11492911, 1.1085392, 1.1022529, 1.09629121])
    np.testing.assert_allclose(expected_quad1, efh1, rtol=1e-3)

    # Test function quadrant 2, negative takeoff angle, skier reaches 100mph
    takeoff_quad2 = (landing.start[0] - 2, landing.start[1] + 2)
    _, efh_speed, _ = landing.calculate_efh(np.deg2rad(-takeoff_angle),
                                            takeoff_quad2,
                                            skier,
                                            increment=0.2)
    expected_speedskier = \
        np.array([2.19864804, 2.81796169, 3.02391351, 3.06098472, 3.37912743,
                  3.79123023, 4.43642894, 5.67024268, 9.02957195, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan,
                  np.nan, np.nan, np.nan, np.nan, np.nan])
    np.testing.assert_allclose(expected_speedskier, efh_speed, rtol=1e-3)

    # Test quadrant 2, positive takeoff angle
    _, efh2, _ = landing.calculate_efh(np.deg2rad(takeoff_angle),
                                       takeoff_quad2,
                                       skier,
                                       increment=0.2)
    expected_quad2 = \
        np.array([2.06869294, 2.32862611, 2.3347512, 2.26367959, 2.26529656,
                  2.24632669, 2.21713456, 2.18302593, 2.1512251, 2.12735662,
                  2.09678855, 2.06501121, 2.03247095, 1.99966889, 1.96699731,
                  1.93383061, 1.90044012, 1.86702338, 1.83375702, 1.80080359,
                  1.76806412, 1.7356011, 1.70352141, 1.67190225, 1.64082434,
                  1.6102607, 1.58024025, 1.5507907, 1.52196707, 1.49379066,
                  1.46623922, 1.43932379, 1.41305071, 1.38744381, 1.36250567,
                  1.33821319, 1.31455562, 1.29153864, 1.269153, 1.24739955,
                  1.22625899, 1.20571586, 1.18576279, 1.16638949, 1.14758039,
                  1.12932359, 1.11160082, 1.0944017, 1.07765136, 1.06159372,
                  1.0458878, 1.03062399, 1.01586807, 1.00152386, 0.98760244,
                  0.97411286, 0.96106147, 0.94838167, 0.93598971, 0.92389684,
                  0.91232011, 0.90106331, 0.8901389, 0.87949807, 0.86953028,
                  0.85918949, 0.84947943, 0.8403958, 0.83124043, 0.82234776,
                  0.81371588, 0.8053377, 0.79719569, 0.78926783, 0.78157332,
                  0.77407325, 0.76664867, 0.75940032, 0.75243023, 0.7457996,
                  0.73929607, 0.73297045, 0.72681849, 0.72083236, 0.71508179])
    np.testing.assert_allclose(expected_quad2, efh2, rtol=1e-3)

    # Test quadrant 2, negative takeoff angle less than 45
    with pytest.raises(InvalidJumpError):
        dist, _, _ = landing.calculate_efh(np.deg2rad(-46),
                                           takeoff_quad2,
                                           skier,
                                           increment=0.2)

    # Test function quadrant 3
    takeoff_quad3 = (landing.start[0] - 2, landing.start[1] - 2)
    with pytest.raises(InvalidJumpError):
        dist, _, _ = landing.calculate_efh(np.deg2rad(takeoff_angle),
                                           takeoff_quad3,
                                           skier,
                                           increment=0.2)

    # Test function quadrant 4
    takeoff_quad4 = (landing.start[0] + 2, landing.start[1] - 2)
    with pytest.raises(InvalidJumpError):
        dist, _, _ = landing.calculate_efh(np.deg2rad(takeoff_angle),
                                           takeoff_quad4,
                                           skier,
                                           increment=0.2)
コード例 #27
0
def main():
    usage = "usage: %prog [-h] [-o output_file_path] scriptfile [arg] ..."
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False
    parser.add_option('',
                      '--html',
                      dest="output_html",
                      action='store_true',
                      help="output HTML instead of text",
                      default=False)
    parser.add_option('-o',
                      '--outfile',
                      dest="outfile",
                      action='store',
                      help="save stats to <outfile>",
                      default=None)

    if not sys.argv[1:]:
        parser.print_usage()
        sys.exit(2)

    (options, args) = parser.parse_args()
    sys.argv[:] = args

    if len(args) > 0:
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))

        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

        profiler = Profiler()
        profiler.start()

        try:
            exec code in globs, None
        except SystemExit, KeyboardInterrupt:
            pass

        profiler.stop()

        if options.outfile:
            f = codecs.open(options.outfile, 'w', 'utf-8')
            unicode = True
            color = False
        else:
            f = sys.stdout
            unicode = stdout_supports_unicode()
            color = stdout_supports_color()

        if options.output_html:
            f.write(profiler.output_html())
        else:
            f.write(profiler.output_text(unicode=unicode, color=color))

        f.close()
コード例 #28
0
 def before_request():
     if "profile" in request.args:
         g.profiler = Profiler()
         g.profiler.start()
コード例 #29
0
 def before_request():
     if "profiler" not in g:
         g.profiler = Profiler()
         g.profiler_starttime = time.time()
         g.profiler.start()
コード例 #30
0
ファイル: main.py プロジェクト: willu47/energy_demand
    # Generate dwelling stocks over whole simulation period
    base_data['rs_dw_stock'] = dw_stock.rs_dw_stock(base_data['lu_reg'], base_data)
    base_data['ss_dw_stock'] = dw_stock.ss_dw_stock(base_data['lu_reg'], base_data)

    results_every_year = []
    for sim_yr in base_data['sim_param']['sim_period']:
        base_data['sim_param']['curr_yr'] = sim_yr

        print("-------------------------- ")
        print("SIM RUN:  " + str(sim_yr))
        print("-------------------------- ")

        #-------------PROFILER
        if instrument_profiler:
            from pyinstrument import Profiler
            profiler = Profiler(use_signal=False)
            profiler.start()

        _, model_run_object = energy_demand_model(base_data)

        if instrument_profiler:
            profiler.stop()
            print("Profiler Results")
            print(profiler.output_text(unicode=True, color=True))

        results_every_year.append(model_run_object)

        # ---------------------------------------------------
        # Validation of national electrictiy demand for base year
        # ---------------------------------------------------
        #'''
コード例 #31
0
class BenchmarkPtycho(unittest.TestCase):
    """Run benchmarks for pychography reconstruction."""

    def setUp(self):
        """Create a test dataset."""
        self.profiler = Profiler()
        dataset_file = '../tests/data/ptycho_setup.pickle.lzma'
        with lzma.open(dataset_file, 'rb') as file:
            [
                self.data,
                self.scan,
                self.probe,
                self.original,
            ] = pickle.load(file)

    @unittest.skip('Demonstrate skipped tests.')
    def test_never(self):
        """Never run this test."""
        pass

    def template_algorithm(self, algorithm):
        """Use pyinstrument to benchmark a ptycho algorithm on one core."""
        logging.disable(logging.WARNING)
        result = {
            'psi': np.ones_like(self.original),
            'probe': self.probe,
            'scan': self.scan,
        }
        # Do one iteration to complete JIT compilation
        result = tike.ptycho.reconstruct(
            **result,
            data=self.data,
            algorithm=algorithm,
            num_iter=1,
            rtol=-1,
        )
        self.profiler.start()
        result = tike.ptycho.reconstruct(
            **result,
            data=self.data,
            algorithm=algorithm,
            num_iter=50,
            rtol=-1,
        )
        self.profiler.stop()
        print('\n')
        print(self.profiler.output_text(
            unicode=True,
            color=True,
        ))

    def test_combined(self):
        """Use pyinstrument to benchmark the combined algorithm."""
        self.template_algorithm('combined')

    def test_divided(self):
        """Use pyinstrument to benchmark the divided algorithm."""
        self.template_algorithm('divided')

    def test_admm(self):
        """Use pyinstrument to benchmark the admm algorithm."""
        self.template_algorithm('admm')
コード例 #32
0
ファイル: interrupt.py プロジェクト: asmeurer/pyinstrument
from pyinstrument import Profiler
from platform import platform

p = Profiler()

p.start()

def func():
    fd = open('/dev/urandom', 'rb')
    data = fd.read(1024*1024)

func()

# this failed on ubuntu 12.04 
platform()

p.stop()

print(p.output_text())

with open('ioerror_out.html', 'w') as f:
    f.write(p.output_html())
コード例 #33
0
def run(args, device, data):
    # Unpack data
    train_nid, val_nid, test_nid, in_feats, n_classes, g = data
    # Create sampler
    sampler = NeighborSampler(
        g, [int(fanout) for fanout in args.fan_out.split(',')],
        dgl.distributed.sample_neighbors, device)

    # Create DataLoader for constructing blocks
    dataloader = DistDataLoader(dataset=train_nid.numpy(),
                                batch_size=args.batch_size,
                                collate_fn=sampler.sample_blocks,
                                shuffle=True,
                                drop_last=False)

    # Define model and optimizer
    model = DistSAGE(in_feats, args.num_hidden, n_classes, args.num_layers,
                     F.relu, args.dropout)
    model = model.to(device)
    if not args.standalone:
        if args.num_gpus == -1:
            model = th.nn.parallel.DistributedDataParallel(model)
        else:
            dev_id = g.rank() % args.num_gpus
            model = th.nn.parallel.DistributedDataParallel(
                model, device_ids=[dev_id], output_device=dev_id)
    loss_fcn = nn.CrossEntropyLoss()
    loss_fcn = loss_fcn.to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    train_size = th.sum(g.ndata['train_mask'][0:g.number_of_nodes()])

    # Training loop
    iter_tput = []
    profiler = Profiler()
    if args.close_profiler == False:
        profiler.start()
    epoch = 0
    for epoch in range(args.num_epochs):
        tic = time.time()

        sample_time = 0
        forward_time = 0
        backward_time = 0
        update_time = 0
        num_seeds = 0
        num_inputs = 0
        start = time.time()
        # Loop over the dataloader to sample the computation dependency graph as a list of
        # blocks.
        step_time = []
        for step, blocks in enumerate(dataloader):
            tic_step = time.time()
            sample_time += tic_step - start

            # The nodes for input lies at the LHS side of the first block.
            # The nodes for output lies at the RHS side of the last block.
            batch_inputs = blocks[0].srcdata['features']
            batch_labels = blocks[-1].dstdata['labels']
            batch_labels = batch_labels.long()

            num_seeds += len(blocks[-1].dstdata[dgl.NID])
            num_inputs += len(blocks[0].srcdata[dgl.NID])
            blocks = [block.to(device) for block in blocks]
            batch_labels = batch_labels.to(device)
            # Compute loss and prediction
            start = time.time()
            batch_pred = model(blocks, batch_inputs)
            loss = loss_fcn(batch_pred, batch_labels)
            forward_end = time.time()
            optimizer.zero_grad()
            loss.backward()
            compute_end = time.time()
            forward_time += forward_end - start
            backward_time += compute_end - forward_end

            optimizer.step()
            update_time += time.time() - compute_end

            step_t = time.time() - tic_step
            step_time.append(step_t)
            iter_tput.append(len(blocks[-1].dstdata[dgl.NID]) / step_t)
            if step % args.log_every == 0:
                acc = compute_acc(batch_pred, batch_labels)
                gpu_mem_alloc = th.cuda.max_memory_allocated(
                ) / 1000000 if th.cuda.is_available() else 0
                print(
                    'Part {} | Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MiB | time {:.3f} s'
                    .format(g.rank(), epoch, step, loss.item(), acc.item(),
                            np.mean(iter_tput[3:]), gpu_mem_alloc,
                            np.sum(step_time[-args.log_every:])))
            start = time.time()

        toc = time.time()
        print(
            'Part {}, Epoch Time(s): {:.4f}, sample+data_copy: {:.4f}, forward: {:.4f}, backward: {:.4f}, update: {:.4f}, #seeds: {}, #inputs: {}'
            .format(g.rank(), toc - tic, sample_time, forward_time,
                    backward_time, update_time, num_seeds, num_inputs))
        epoch += 1

        if epoch % args.eval_every == 0 and epoch != 0:
            start = time.time()
            val_acc, test_acc = evaluate(model.module, g, g.ndata['features'],
                                         g.ndata['labels'], val_nid, test_nid,
                                         args.batch_size_eval, device)
            print('Part {}, Val Acc {:.4f}, Test Acc {:.4f}, time: {:.4f}'.
                  format(g.rank(), val_acc, test_acc,
                         time.time() - start))
    if args.close_profiler == False:
        profiler.stop()
        print(profiler.output_text(unicode=True, color=True))
コード例 #34
0
ファイル: __main__.py プロジェクト: mcfletch/pyinstrument
        }

        profiler = Profiler()
        profiler.start()

        try:
            exec code in globs, None
        except SystemExit, KeyboardInterrupt:
            pass

        profiler.stop()
        
        write_output( options, profiler )

    elif options.infile:
        profiler = Profiler()
        if options.infile in (b'-','-'):
            fh = sys.stdin
        else:
            fh = codecs.open( options.infile, 'r', 'utf-8')
        try:
            content = fh.read()
        finally:
            fh.close()
        
        profiler.from_json( content )
        write_output( options, profiler )
        
    else:
        parser.print_usage()
    return parser
コード例 #35
0
 def __enter__(self):
     """__enter__."""
     self.profiler = Profiler()  # or Profiler(use_signal=False), see below
     self.profiler.start()
コード例 #36
0
ファイル: test_paths.py プロジェクト: skylouis/trimesh
    """
    # root count should be the same as the closed polygons
    assert len(path.root) == len(path.polygons_full)

    # make sure polygons are really polygons
    assert all(type(i).__name__ == 'Polygon'
               for i in path.polygons_full)
    assert all(type(i).__name__ == 'Polygon'
               for i in path.polygons_closed)

    # these should all correspond to each other
    assert len(path.discrete) == len(path.polygons_closed)
    assert len(path.discrete) == len(path.paths)

    # make sure None polygons are not referenced in graph
    assert all(path.polygons_closed[i] is not None
               for i in path.enclosure_directed.nodes())


if __name__ == '__main__':
    from pyinstrument import Profiler
    prof = Profiler()
    prof.start()

    # g.trimesh.util.attach_to_log()
    g.unittest.main()

    prof.stop()
    txt = prof.output_text(unicode=True, color=True)
    print(txt)
コード例 #37
0
ファイル: __main__.py プロジェクト: Wilo/pyinstrument
def main():
    usage = ("usage: python -m pyinstrument [options] scriptfile [arg] ...")
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False
    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help="output HTML instead of text", default=False)
    parser.add_option('-o', '--outfile',
        dest="outfile", action='store',
        help="save report to <outfile>", default=None)

    parser.add_option('', '--unicode',
        dest='unicode', action='store_true',
        help='force unicode text output')
    parser.add_option('', '--no-unicode',
        dest='unicode', action='store_false',
        help='force ascii text output')

    parser.add_option('', '--color',
        dest='color', action='store_true',
        help='force ansi color text output')
    parser.add_option('', '--no-color',
        dest='color', action='store_false',
        help='force no color text output')

    if not sys.argv[1:]:
        parser.print_help()
        sys.exit(2)

    (options, args) = parser.parse_args()
    sys.argv[:] = args

    if len(args) > 0:
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))

        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

        try:
            profiler = Profiler()
        except SignalUnavailableError:
            profiler = Profiler(use_signal=False)

        profiler.start()

        try:
            exec_(code, globs, None)
        except (SystemExit, KeyboardInterrupt):
            pass

        profiler.stop()

        if options.outfile:
            f = codecs.open(options.outfile, 'w', 'utf-8')
        else:
            f = sys.stdout

        unicode_override = options.unicode != None
        color_override = options.color != None

        unicode = options.unicode if unicode_override else file_supports_unicode(f)
        color = options.color if color_override else file_supports_color(f)

        if options.output_html:
            f.write(profiler.output_html())
        else:
            f.write(profiler.output_text(unicode=unicode, color=color))

        f.close()
    else:
        parser.print_usage()
    return parser
コード例 #38
0
 def before_request():
     g.profiler = Profiler()
     g.profiler.start()
コード例 #39
0
ファイル: grid_gcn4.py プロジェクト: honeyhaoyan/Grid_GCN
    def forward(self, pos, centroids, centroids_index, index_voxels, voxel_size, neighbour_voxel_list, mask):
        profiler = Profiler()
        profiler.start()
        device = pos.device
        B, N, _ = pos.shape
        center_pos = index_points(pos, centroids)
        _, S, _ = center_pos.shape
        group_idx = torch.ones(B, S, self.n_neighbor).to(device)
        i = 0

        #neighbour_movement_list = np.array([[-1,-1,-1],[-1,-1,0],[-1,-1,1],[-1,0,-1],[-1,0,0],[-1,0,1],[-1,1,-1],[-1,1,0],[-1,1,1],[0,-1,-1],[0,-1,0],[0,-1,1],[0,0,-1],[0,0,0],[0,0,1],[0,1,-1],[0,1,0],[0,1,1],[1,-1,-1],[1,-1,0],[1,-1,1],[1,0,-1],[1,0,0],[1,0,1],[1,1,-1],[1,1,0],[1,1,1]])
        #print(neighbour_movement_list)
        

        for batch in center_pos:
            print(i)
            voxel_set = set()
            voxels = index_voxels[i]
            j = 0
            
            #center_voxel_id = get_voxel_id(center)
            #sorted_v_id, sorted_c_id = sort(cat(center_voxel_id, center))
            #for center in sorted_c_id:
            #    if current_v_id != last_v_id:
            #        preprocess
            #    sampling
            
            center_voxel_id = (batch*(voxel_size-1)).int()
            #print(center_voxel_id)
            #print(center_voxel_id.size())
            #print(center_voxel_id)

            new_center_voxel_id = center_voxel_id[:,0]*10000+center_voxel_id[:,1]*100+center_voxel_id[:,2]

            sorted_centers, center_indexes = torch.sort(new_center_voxel_id)
            #for item in sorted_centers:
            #    print(item)
            #print(sorted_centers)
            #print(center_indexes)
            
            current_voxel = None
            current_context_points = []
            j = 0
            for index in center_indexes:
                self_voxel = center_voxel_id[index]
                #print(self_voxel)
                if((not current_voxel==None) and torch.all(torch.eq(self_voxel, current_voxel))):
                    self_context_points = current_context_points
                else:
                    #self_neighbour_voxels = neighbour_voxel_list[i].get(tuple(self_voxel))
                    x_1 = self_voxel[0].item()
                    y_1 = self_voxel[1].item()
                    z_1 = self_voxel[2].item()
                    self_neighbour_voxels = neighbour_voxel_list[i][x_1][y_1][z_1]
                    current_context_points = []
                    for voxel in self_neighbour_voxels:
                        #voxel = voxel.int()
                        #print(voxel)
                        x = voxel[0].item()
                        y = voxel[1].item()
                        z = voxel[2].item()
                        if (x<0 or x>39 or y<0 or y>39 or z<0 or z>39):
                            continue
                        if (mask[i][x][y][z].item()==0):
                            continue
                        points = voxels.get((x,y,z))
                        #current_context_points = []
                        #for point in points:
                        #    current_context_points.append(point)
                        #print("current context points")
                        #print(current_context_points)
                        current_context_points+=points
                        #print(current_context_points)
                        self_context_points = current_context_points
                k = 0
                if (len(self_context_points)>self.n_neighbor):
                    self_context_points = random.sample(self_context_points,self.n_neighbor)
                if self_context_points:
                    # delete for 
                    #for item in self_context_points:
                    #    group_idx[i][index][k] = item
                    #    k = k+1
                    #print("group idx pre")
                    #print(group_idx[i][index])
                    group_idx[i][index][0:(len(self_context_points))] = torch.FloatTensor(self_context_points).to(device)
                    #print(group_idx[i][index])
                #while (k<self.n_neighbor):
                #    group_idx[i][index][k] = centroids[i][index]
                #    k = k+1
                #print("group idx after")
                #print(group_idx[i][index])
                if (len(self_context_points)<self.n_neighbor):
                    group_idx[i][index][len(self_context_points):(self.n_neighbor)] = centroids[i][index]
                #print(group_idx[i][index])
                
                j = j+1
                
                        

            i = i+1

        group_idx = group_idx.float().to(device)
        print(group_idx.shape)

        profiler.stop()

        print(profiler.output_text(unicode=True, color=True,show_all = True))
        return group_idx
コード例 #40
0
 def wrapper(*args, **kwargs):
     profiler = Profiler()
     profiler.start()
     func(*args, **kwargs)
     profiler.stop()
     print(profiler.output_text(unicode=False, color=True))
コード例 #41
0
ファイル: app.py プロジェクト: keur/moffle
def inject_profiler():
    request.profiler = Profiler(use_signal=False)
    request.profiler.start()
コード例 #42
0
import sys
import time
import random
import numpy as np
from array import *
from math import ceil, floor, sqrt
#import plotly.plotly as py
#import plotly.tools as tls	
from pyinstrument import Profiler

profiler = Profiler()
profiler.start()

random.seed(420)

if(len(sys.argv) < 3):
	sys.exit("usage: python[3] simSerial_singleNeighborCalc.py [num lattice points each side of square] [num timesteps]") 
	time.sleep(1)

N = int(sys.argv[1])
t = int(sys.argv[2])


class Lattice:
	#static 2D array data structure for lattices
	dirt = []

	# states
 	# 0: dry
	# 1: saturated on top, no water on top
	# 2: not saturated, water on top
コード例 #43
0
def main():
    """Command-line utility for using (and testing) s3 utility methods."""
    logging.basicConfig(level=logging.DEBUG)
    arg_parser = argparse.ArgumentParser(description='Perform obfuscation of forum .mongo dump files.')

    arg_parser.add_argument(
        'input',
        help='Read mongo files from this location.',
    )
    arg_parser.add_argument(
        '-o', '--output',
        help='Write obfuscated mongo files to this location in the local file system.',
        default=None
    )
    arg_parser.add_argument(
        '-u', '--userinfo',
        help='For events, read a custom user-info file from the local fs that contains username, email, user-id, fullname.',
        default=None
    )
    arg_parser.add_argument(
        '--log-context',
        help='characters on each side of match',
        type=int,
        default=50,
    )
    #####################
    # Flags to indicate what to obfuscate.
    #####################
    arg_parser.add_argument(
        '--forum',
        help='Read in and obfuscate forum posts.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--wiki',
        help='Read in and obfuscate wiki documents.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--courseware',
        help='Read in and obfuscate courseware_studentmodule records.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--event',
        help='Read in and obfuscate events.',
        action='store_true',
    )

    #####################
    # Various flags to indicate what to look for.
    #####################
    arg_parser.add_argument(
        '--phone',
        help='Extract phone numbers',
        action='store_true',
    )
    arg_parser.add_argument(
        '--possible-phone',
        help='Extract phone numbers',
        action='store_true',
    )
    arg_parser.add_argument(
        '--email',
        help='Extract email addresses',
        action='store_true',
    )
    arg_parser.add_argument(
        '--phone-context',
        help='Extract phone number context',
        action='store_true',
    )
    arg_parser.add_argument(
        '--email-context',
        help='Extract email address context',
        action='store_true',
    )
    arg_parser.add_argument(
        '--name-context',
        help='Extract name context',
        action='store_true',
    )
    arg_parser.add_argument(
        '--facebook',
        help='Extract facebook urls',
        action='store_true',
    )
    arg_parser.add_argument(
        '--username',
        help='Extract username',
        action='store_true',
    )
    arg_parser.add_argument(
        '--fullname',
        help='Extract fullname.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--userid',
        help='Extract user-id.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--skip-post',
        help='Skip performing filtering on event.POST entries.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--pyinstrument',
        help='Profile the run and write the output to stderr',
        action='store_true'
    )
    args = arg_parser.parse_args()
    kwargs = vars(args)

    profiler = None
    if args.pyinstrument:
        profiler = Profiler()  # or Profiler(use_signal=False), see below
        profiler.start()

    try:
        obfuscator = BulkObfuscator(**kwargs)
        obfuscator.obfuscate_directory(args.input, args.output)
    finally:
        if profiler:
            profiler.stop()
            print >>sys.stderr, profiler.output_text(unicode=True, color=True)
コード例 #44
0
def main():
    """Command-line utility for using (and testing) s3 utility methods."""
    logging.basicConfig(level=logging.DEBUG)
    arg_parser = argparse.ArgumentParser(
        description='Perform obfuscation of forum .mongo dump files.')

    arg_parser.add_argument(
        'input',
        help='Read mongo files from this location.',
    )
    arg_parser.add_argument(
        '-o',
        '--output',
        help=
        'Write obfuscated mongo files to this location in the local file system.',
        default=None)
    arg_parser.add_argument(
        '-u',
        '--userinfo',
        help=
        'For events, read a custom user-info file from the local fs that contains username, email, user-id, fullname.',
        default=None)
    arg_parser.add_argument(
        '--log-context',
        help='characters on each side of match',
        type=int,
        default=50,
    )
    #####################
    # Flags to indicate what to obfuscate.
    #####################
    arg_parser.add_argument(
        '--forum',
        help='Read in and obfuscate forum posts.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--wiki',
        help='Read in and obfuscate wiki documents.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--courseware',
        help='Read in and obfuscate courseware_studentmodule records.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--event',
        help='Read in and obfuscate events.',
        action='store_true',
    )

    #####################
    # Various flags to indicate what to look for.
    #####################
    arg_parser.add_argument(
        '--phone',
        help='Extract phone numbers',
        action='store_true',
    )
    arg_parser.add_argument(
        '--possible-phone',
        help='Extract phone numbers',
        action='store_true',
    )
    arg_parser.add_argument(
        '--email',
        help='Extract email addresses',
        action='store_true',
    )
    arg_parser.add_argument(
        '--phone-context',
        help='Extract phone number context',
        action='store_true',
    )
    arg_parser.add_argument(
        '--email-context',
        help='Extract email address context',
        action='store_true',
    )
    arg_parser.add_argument(
        '--name-context',
        help='Extract name context',
        action='store_true',
    )
    arg_parser.add_argument(
        '--facebook',
        help='Extract facebook urls',
        action='store_true',
    )
    arg_parser.add_argument(
        '--username',
        help='Extract username',
        action='store_true',
    )
    arg_parser.add_argument(
        '--fullname',
        help='Extract fullname.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--userid',
        help='Extract user-id.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--skip-post',
        help='Skip performing filtering on event.POST entries.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--pyinstrument',
        help='Profile the run and write the output to stderr',
        action='store_true')
    args = arg_parser.parse_args()
    kwargs = vars(args)

    profiler = None
    if args.pyinstrument:
        profiler = Profiler()  # or Profiler(use_signal=False), see below
        profiler.start()

    try:
        obfuscator = BulkObfuscator(**kwargs)
        obfuscator.obfuscate_directory(args.input, args.output)
    finally:
        if profiler:
            profiler.stop()
            print >> sys.stderr, profiler.output_text(unicode=True, color=True)
コード例 #45
0
def instrument(
        cls_runner: tp.Type[Perf],
        pattern_func: str,
        timeline: bool = False,
        ) -> None:
    '''
    Profile the `sf` function from the supplied class.
    '''
    runner = cls_runner()
    for name in runner.iter_function_names(pattern_func):
        f = getattr(runner, name)
        profiler = Profiler(interval=0.0001) # default is 0.001, 1 ms

        if timeline:
            profiler.start()
            f()
            profiler.stop()
        else:
            profiler.start()
            for _ in range(runner.NUMBER):
                f()
            profiler.stop()

        print(profiler.output_text(unicode=True, color=True, timeline=timeline, show_all=True))
コード例 #46
0
ファイル: train_dist.py プロジェクト: vickysvicky/dgl
def run(args, device, data):
    # Unpack data
    train_nid, val_nid, in_feats, n_classes, g = data
    # Create sampler
    sampler = NeighborSampler(
        g, [int(fanout) for fanout in args.fan_out.split(',')],
        dgl.distributed.sample_neighbors)

    # Create PyTorch DataLoader for constructing blocks
    dataloader = DataLoader(dataset=train_nid.numpy(),
                            batch_size=args.batch_size,
                            collate_fn=sampler.sample_blocks,
                            shuffle=True,
                            drop_last=False,
                            num_workers=args.num_workers)

    # Define model and optimizer
    model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu,
                 args.dropout)
    model = model.to(device)
    model = th.nn.parallel.DistributedDataParallel(model)
    loss_fcn = nn.CrossEntropyLoss()
    loss_fcn = loss_fcn.to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    train_size = th.sum(g.ndata['train_mask'][0:g.number_of_nodes()])

    # Training loop
    iter_tput = []
    profiler = Profiler()
    profiler.start()
    epoch = 0
    for epoch in range(args.num_epochs):
        tic = time.time()

        sample_time = 0
        copy_time = 0
        forward_time = 0
        backward_time = 0
        update_time = 0
        num_seeds = 0
        num_inputs = 0
        start = time.time()
        # Loop over the dataloader to sample the computation dependency graph as a list of
        # blocks.
        step_time = []
        for step, blocks in enumerate(dataloader):
            tic_step = time.time()
            sample_time += tic_step - start

            # The nodes for input lies at the LHS side of the first block.
            # The nodes for output lies at the RHS side of the last block.
            input_nodes = blocks[0].srcdata[dgl.NID]
            seeds = blocks[-1].dstdata[dgl.NID]

            # Load the input features as well as output labels
            start = time.time()
            batch_inputs, batch_labels = load_subtensor(
                g, seeds, input_nodes, device)
            copy_time += time.time() - start

            num_seeds += len(blocks[-1].dstdata[dgl.NID])
            num_inputs += len(blocks[0].srcdata[dgl.NID])
            # Compute loss and prediction
            start = time.time()
            batch_pred = model(blocks, batch_inputs)
            loss = loss_fcn(batch_pred, batch_labels)
            forward_end = time.time()
            optimizer.zero_grad()
            loss.backward()
            compute_end = time.time()
            forward_time += forward_end - start
            backward_time += compute_end - forward_end

            # Aggregate gradients in multiple nodes.
            for param in model.parameters():
                if param.requires_grad and param.grad is not None:
                    th.distributed.all_reduce(param.grad.data,
                                              op=th.distributed.ReduceOp.SUM)
                    param.grad.data /= args.num_client

            optimizer.step()
            update_time += time.time() - compute_end

            step_t = time.time() - tic_step
            step_time.append(step_t)
            iter_tput.append(num_seeds / (step_t))
            if step % args.log_every == 0:
                acc = compute_acc(batch_pred, batch_labels)
                gpu_mem_alloc = th.cuda.max_memory_allocated(
                ) / 1000000 if th.cuda.is_available() else 0
                print(
                    'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MiB | time {:.3f} s'
                    .format(epoch, step, loss.item(), acc.item(),
                            np.mean(iter_tput[3:]), gpu_mem_alloc,
                            np.sum(step_time[-args.log_every:])))
            start = time.time()

        toc = time.time()
        print(
            'Epoch Time(s): {:.4f}, sample: {:.4f}, data copy: {:.4f}, forward: {:.4f}, backward: {:.4f}, update: {:.4f}, #seeds: {}, #inputs: {}'
            .format(toc - tic, sample_time, copy_time, forward_time,
                    backward_time, update_time, num_seeds, num_inputs))
        epoch += 1

        toc = time.time()
        print('Epoch Time(s): {:.4f}'.format(toc - tic))
        #if epoch % args.eval_every == 0 and epoch != 0:
        #    eval_acc = evaluate(model, g, g.ndata['features'], g.ndata['labels'], val_nid, args.batch_size, device)
        #    print('Eval Acc {:.4f}'.format(eval_acc))

    profiler.stop()
    print(profiler.output_text(unicode=True, color=True))
    # clean up
    g._client.barrier()
    dgl.distributed.shutdown_servers()
    dgl.distributed.finalize_client()
コード例 #47
0
    def run(self, workers, profile=False):
        if profile:
            try:
                from pyinstrument import Profiler
            except:
                raise ImportError('You must install pyinstrument to use profiling.')
            profiler = Profiler()
            profiler.start()

        # setup queuer
        rollout_queuer = RolloutQueuerAsync(workers, self.nb_learn_batch, self.rollout_queue_size)
        rollout_queuer.start()

        # initial setup
        global_step_count = self.initial_step_count
        next_save = self.init_next_save(self.initial_step_count, self.epoch_len)
        prev_step_t = time()
        ep_rewards = torch.zeros(self.nb_env)
        start_time = time()

        # loop until total number steps
        print('{} starting training'.format(self.rank))
        while not self.done(global_step_count):
            self.exp.clear()
            # Get batch from queue
            rollouts, terminal_rewards, terminal_infos = rollout_queuer.get()

            # Iterate forward on batch
            self.exp.write_exps(rollouts)
            # keep a copy of terminals on the cpu it's faster
            rollout_terminals = torch.stack(self.exp['terminals']).numpy()
            self.exp.to(self.device)
            r = self.exp.read()
            internals = {k: ts[0].unbind(0) for k, ts in r.internals.items()}
            for obs, rewards, terminals in zip(
                    r.observations,
                    r.rewards,
                    rollout_terminals
            ):
                _, h_exp, internals = self.actor.act(self.network, obs,
                                                     internals)
                self.exp.write_actor(h_exp, no_env=True)

                # where returns a single element tuple with the indexes
                terminal_inds = np.where(terminals)[0]
                for i in terminal_inds:
                    for k, v in self.network.new_internals(self.device).items():
                        internals[k][i] = v

            # compute loss
            loss_dict, metric_dict = self.learner.compute_loss(
                self.network, self.exp.read(), r.next_observation, internals
            )
            total_loss = torch.sum(
                torch.stack(tuple(loss for loss in loss_dict.values()))
            )

            self.optimizer.zero_grad()
            total_loss.backward()
            self.optimizer.step()

            # Perform state updates
            global_step_count += self.nb_env * self.nb_learn_batch * len(r.terminals) * self.nb_learners

            # if rank 0 write summaries and save
            # and send parameters to workers async
            if self.rank == 0:
                # TODO: this could be parallelized, chunk by nb learners
                self.synchronize_worker_parameters(workers, global_step_count)

                # possible save
                if global_step_count >= next_save:
                    self.saver.save_state_dicts(
                        self.network, global_step_count, self.optimizer
                    )
                    next_save += self.epoch_len

                # write reward summaries
                if any(terminal_rewards):
                    terminal_rewards = list(filter(lambda x: x is not None, terminal_rewards))
                    self.summary_writer.add_scalar(
                        'reward', np.mean(terminal_rewards), global_step_count
                    )

                # write infos
                if any(terminal_infos):
                    terminal_infos = list(filter(lambda x: x is not None, terminal_infos))
                    float_keys = [
                        k for k, v in terminal_infos[0].items() if type(v) == float
                    ]
                    terminal_infos_dlist = listd_to_dlist(terminal_infos)
                    for k in float_keys:
                        self.summary_writer.add_scalar(
                            f'info/{k}',
                            np.mean(terminal_infos_dlist[k]),
                            global_step_count
                        )

            # write summaries
            cur_step_t = time()
            if cur_step_t - prev_step_t > self.summary_freq:
                print('Rank {} Metrics:'.format(self.rank), rollout_queuer.metrics())
                if self.rank == 0:
                    self.write_summaries(
                        self.summary_writer, global_step_count, total_loss,
                        loss_dict, metric_dict, self.network.named_parameters()
                    )
                prev_step_t = cur_step_t

        rollout_queuer.close()
        print('{} stopped training'.format(self.rank))
        if profile:
            profiler.stop()
            print(profiler.output_text(unicode=True, color=True))
コード例 #48
0
ファイル: observe.py プロジェクト: lpsinger/ztf_sim
def observe(run_name=run_name, start_time='2018-01-01 04:00:00',
            weather_year=None, survey_duration=1 * u.hour):

    if profile:
        profiler = Profiler()
        profiler.start()

    survey_start_time = Time(start_time, scale='utc', location=P48_loc)

    tel = ZTFStateMachine(
        current_time=survey_start_time,
        historical_observability_year=weather_year)

    # set up QueueManager
    Q = GreedyQueueManager()

    # set up Observing Programs
    #CollabOP = CollaborationObservingProgram()
    # Q.add_observing_program(CollabOP)
    MSIPOP = MSIPObservingProgram(
        Q.fields.select_field_ids(dec_range=[-30, 90], grid_id=0))
    MSIPOP.observing_time_fraction = 1.0
    Q.add_observing_program(MSIPOP)
    #CaltechOP = CaltechObservingProgram()
    # Q.add_observing_program(CaltechOP)

    # initialize nightly field requests (Tom Barlow function)
    Q.assign_nightly_requests(tel.current_state_dict())

    # temporary loading to test things
    # Q.rp.add_requests(1,
    #        Q.fields.fields[
    #            Q.fields.select_fields(dec_range=[-30,90])].index, 2,
    #        'no_cadence',{})

    # initialize sqlite history
    log = ObsLogger(run_name, tel.current_time)
    log.create_pointing_log(clobber=True)

    while tel.current_time < (survey_start_time + survey_duration):

        # TODO: reload queue with new requests on update interval (nightly

        if tel.check_if_ready():
            current_state = tel.current_state_dict()
            # get coords
            next_obs = Q.next_obs(current_state)

            # TODO: filter change, if needed

            if not tel.start_slew(coord.SkyCoord(next_obs['target_ra'] * u.deg,
                                                 next_obs['target_dec'] * u.deg)):
                tel.set_cant_observe()
                # TODO: log the failure
                # "missed history": http://ops2.lsst.org/docs/current/architecture.html#output-tables
                log.prev_obs = None
                tel.wait()
                continue
            if not tel.start_exposing():
                tel.set_cant_observe()
                # TODO: log the failure
                log.prev_obs = None
                tel.wait()
                continue
            else:
                # exposure completed successfully.  now
                # a) store exposure information in pointing history sqlite db
                current_state = tel.current_state_dict()
                log.log_pointing(current_state, next_obs)
                # b) update Fields
                Q.fields.mark_field_observed(next_obs, current_state)
                # c) remove completed request_id
                Q.rp.remove_requests(next_obs['request_id'])
        else:
            tel.set_cant_observe()
            tel.wait()

    if profile:
        profiler.stop()
        print profiler.output_text(unicode=True, color=True)
コード例 #49
0
ファイル: profiling.py プロジェクト: xadupre/pyquickhelper
def profile(fct,
            sort='cumulative',
            rootrem=None,
            as_df=False,
            pyinst_format=None,
            **kwargs):
    """
    Profiles the execution of a function.

    @param      fct             function to profile
    @param      sort            see `sort_stats <https://docs.python.org/3/library/
                                profile.html#pstats.Stats.sort_stats>`_
    @param      rootrem         root to remove in filenames
    @param      as_df           return the results as a dataframe and not text
    @param      pyinst_format   format for :epkg:`pyinstrument`, if not empty,
                                the function uses this module or raises an exception if not
                                installed, the options are *text*, *textu* (text with colors),
                                *json*, *html*
    @param      kwargs          additional parameters used to create the profiler
    @return                     raw results, statistics text dump (or dataframe is *as_df* is True)

    .. plot::

        import matplotlib.pyplot as plt
        from pyquickhelper.pycode.profiling import profile
        from pyquickhelper.texthelper import compare_module_version

        def fctm():
            return compare_module_version('0.20.4', '0.22.dev0')

        pr, df = profile(lambda: [fctm() for i in range(0, 1000)], as_df=True)
        ax = df[['namefct', 'cum_tall']].head(n=15).set_index(
            'namefct').plot(kind='bar', figsize=(8, 3), rot=30)
        ax.set_title("example of a graph")
        for la in ax.get_xticklabels():
            la.set_horizontalalignment('right');
        plt.show()
    """
    if pyinst_format is None:
        pr = cProfile.Profile(**kwargs)
        pr.enable()
        fct()
        pr.disable()
        s = StringIO()
        ps = pstats.Stats(pr, stream=s).sort_stats(sort)
        ps.print_stats()
        res = s.getvalue()
        try:
            pack = site.getsitepackages()
        except AttributeError:  # pragma: no cover
            import numpy
            pack = os.path.normpath(
                os.path.abspath(
                    os.path.join(os.path.dirname(numpy.__file__), "..")))
            pack = [pack]
        pack_ = os.path.normpath(os.path.join(pack[-1], '..'))

        def clean_text(res):
            res = res.replace(pack[-1], "site-packages")
            res = res.replace(pack_, "lib")
            if rootrem is not None:
                if isinstance(rootrem, str):
                    res = res.replace(rootrem, '')
                else:
                    for sub in rootrem:
                        if isinstance(sub, str):
                            res = res.replace(sub, '')
                        elif isinstance(sub, tuple) and len(sub) == 2:
                            res = res.replace(sub[0], sub[1])
                        else:
                            raise TypeError(
                                "rootrem must contains strings or tuple not {0}"
                                .format(rootrem))
            return res

        if as_df:

            def better_name(row):
                if len(row['fct']) > 15:
                    return "{}-{}".format(row['file'].split(':')[-1],
                                          row['fct'])
                name = row['file'].replace("\\", "/")
                return "{}-{}".format(name.split('/')[-1], row['fct'])

            rows = _process_pstats(ps, clean_text)
            import pandas
            df = pandas.DataFrame(rows)
            df = df[[
                'fct', 'file', 'ncalls1', 'ncalls2', 'tin', 'cum_tin', 'tall',
                'cum_tall'
            ]]
            df['namefct'] = df.apply(lambda row: better_name(row), axis=1)
            df = df.groupby(['namefct', 'file'],
                            as_index=False).sum().sort_values(
                                'cum_tall',
                                ascending=False).reset_index(drop=True)
            return ps, df
        else:
            res = clean_text(res)
            return ps, res
    elif as_df:
        raise ValueError(  # pragma: no cover
            "as_df is not a compatible option with pyinst_format")
    else:
        try:
            from pyinstrument import Profiler
        except ImportError as e:  # pragma: no cover
            raise ImportError("pyinstrument is not installed.") from e

        profiler = Profiler(**kwargs)
        profiler.start()
        fct()
        profiler.stop()

        if pyinst_format == "text":
            return profiler, profiler.output_text(unicode=False, color=False)
        elif pyinst_format == "textu":
            return profiler, profiler.output_text(unicode=True, color=True)
        elif pyinst_format == "json":
            from pyinstrument.renderers import JSONRenderer
            return profiler, profiler.output(JSONRenderer())
        elif pyinst_format == "html":
            return profiler, profiler.output_html()
        else:
            raise ValueError("Unknown format '{}'.".format(pyinst_format))
コード例 #50
0
def main():
    usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False

    parser.add_option('', '--setprofile',
        dest='setprofile', action='store_true',
        help='run in setprofile mode, instead of signal mode', default=False)

    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help="output HTML instead of text", default=False)
    parser.add_option('-o', '--outfile',
        dest="outfile", action='store',
        help="save report to <outfile>", default=None)

    parser.add_option('', '--unicode',
        dest='unicode', action='store_true',
        help='force unicode text output')
    parser.add_option('', '--no-unicode',
        dest='unicode', action='store_false',
        help='force ascii text output')

    parser.add_option('', '--color',
        dest='color', action='store_true',
        help='force ansi color text output')
    parser.add_option('', '--no-color',
        dest='color', action='store_false',
        help='force no color text output')

    if not sys.argv[1:]:
        parser.print_help()
        sys.exit(2)

    (options, args) = parser.parse_args()
    sys.argv[:] = args

    if len(args) > 0:
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))

        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

        try:
            profiler = Profiler(use_signal=not options.setprofile)
        except SignalUnavailableError:
            profiler = Profiler(use_signal=False)

        profiler.start()

        try:
            exec_(code, globs, None)
        except IOError as e:
            import errno

            if e.errno == errno.EINTR:
                print(
                    'Failed to run program due to interrupted system system call.\n'
                    'This happens because pyinstrument is sending OS signals to the running\n'
                    'process to interrupt it. If your program has long-running syscalls this\n'
                    'can cause a problem.\n'
                    '\n'
                    'You can avoid this error by running in \'setprofile\' mode. Do this by\n'
                    'passing \'--setprofile\' when calling pyinstrument at the command-line.\n'
                    '\n'
                    'For more information, see\n'
                    'https://github.com/joerick/pyinstrument/issues/16\n'
                )

            raise
        except (SystemExit, KeyboardInterrupt):
            pass

        profiler.stop()

        if options.outfile:
            f = codecs.open(options.outfile, 'w', 'utf-8')
        else:
            f = sys.stdout

        unicode_override = options.unicode != None
        color_override = options.color != None

        unicode = options.unicode if unicode_override else file_supports_unicode(f)
        color = options.color if color_override else file_supports_color(f)

        if options.output_html:
            f.write(profiler.output_html())
        else:
            f.write(profiler.output_text(unicode=unicode, color=color))

        f.close()
    else:
        parser.print_usage()
    return parser
コード例 #51
0
def profile_ctx(engine='pyinstrument'):
    """
    A context manager which profiles the body of the with statement
    with the supplied profiling engine and returns the profiling object
    in a list.

    Arguments
    ---------
    engine: str
      The profiling engine, e.g. 'pyinstrument' or 'snakeviz' 

    Returns
    -------
    sessions: list
      A list containing the profiling session.
    """
    if engine == 'pyinstrument':
        from pyinstrument import Profiler
        try:
            prof = Profiler()
            prof.start()
        except RuntimeError:
            prof = Profiler(async_mode='disabled')
            prof.start()
    elif engine == 'snakeviz':
        prof = Profile()
        prof.enable()
    elif engine is None:
        pass
    sessions = []
    yield sessions
    if engine == 'pyinstrument':
        sessions.append(prof.stop())
    elif engine == 'snakeviz':
        prof.disable()
        sessions.append(prof)
コード例 #52
0
def observe(config_file, profile=False, raise_queue_empty=True):

    if profile:
        try:
            from pyinstrument import Profiler
        except ImportError:
            print "Error importing pyinstrument"
            profile = False

    ztf_config = ZTFConfiguration("../sims/{}".format(config_file))

    # load config parameters into local variables
    run_name = ztf_config.config["run_name"]
    start_time = ztf_config.config["start_time"]
    weather_year = ztf_config.config["weather_year"]
    if weather_year == "None":
        weather_year = None
    survey_duration = ztf_config.config["survey_duration_days"] * u.day
    block_programs = ztf_config.config["block_programs"]
    observing_programs = ztf_config.build_observing_programs()

    if profile:
        if survey_duration > 1.0 * u.day:
            print ("Don't profile long runs: 25% overhead")
            profile = False
        else:
            profiler = Profiler()
            profiler.start()

    survey_start_time = Time(start_time, scale="utc", location=P48_loc)

    tel = ZTFStateMachine(
        current_time=survey_start_time,
        historical_observability_year=weather_year,
        logfile="../sims/{}_log.txt".format(run_name),
    )

    # set up QueueManager
    Q = GreedyQueueManager(block_programs=block_programs)

    for op in observing_programs:
        Q.add_observing_program(op)

    # initialize nightly field requests (Tom Barlow function)
    Q.assign_nightly_requests(tel.current_state_dict())

    # initialize sqlite history
    log = ObsLogger(run_name, tel.current_time)

    current_night_mjd = np.floor(tel.current_time.mjd)

    while tel.current_time < (survey_start_time + survey_duration):

        # check if it is a new night and reload queue with new requests
        if np.floor(tel.current_time.mjd) > current_night_mjd:
            log.prev_obs = None
            Q.assign_nightly_requests(tel.current_state_dict())
            current_night_mjd = np.floor(tel.current_time.mjd)

        if tel.check_if_ready():
            current_state = tel.current_state_dict()
            # get coords
            try:
                next_obs = Q.next_obs(current_state)
                # TODO: debugging check...
                assert next_obs["request_id"] in Q.queue.index
            except QueueEmptyError:
                if not raise_queue_empty:
                    tel.logger.info("Queue empty!  Waiting...")
                    log.prev_obs = None
                    tel.wait()
                    continue
                else:
                    raise QueueEmptyError("Queue is empty")

            # try to change filters, if needed
            if next_obs["target_filter_id"] != current_state["current_filter_id"]:
                if not tel.start_filter_change(next_obs["target_filter_id"]):
                    tel.logger.info("Filter change failure!  Waiting...")
                    log.prev_obs = None
                    tel.wait()
                    continue

            # try to slew to the next target
            if not tel.start_slew(coord.SkyCoord(next_obs["target_ra"] * u.deg, next_obs["target_dec"] * u.deg)):
                tel.set_cant_observe()
                # TODO: log the failure
                # "missed history": http://ops2.lsst.org/docs/current/architecture.html#output-tables
                tel.logger.info(
                    "Failure slewing to {}, {}!  Waiting...".format(
                        next_obs["target_ra"] * u.deg, next_obs["target_dec"] * u.deg
                    )
                )
                log.prev_obs = None
                tel.wait()
                continue

            # try to expose
            if not tel.start_exposing():
                tel.set_cant_observe()
                tel.logger.info("Exposure failure!  Waiting...")
                log.prev_obs = None
                tel.wait()
                continue
            else:
                # exposure completed successfully.  now
                # a) store exposure information in pointing history sqlite db
                current_state = tel.current_state_dict()
                log.log_pointing(current_state, next_obs)
                # b) update Fields
                Q.fields.mark_field_observed(next_obs, current_state)
                # c) remove completed request_id from the pool and the queue
                # TODO: debugging check
                assert next_obs["request_id"] in Q.queue.index
                Q.remove_requests(next_obs["request_id"])
        else:
            log.prev_obs = None
            tel.set_cant_observe()
            tel.wait()

    if profile:
        profiler.stop()
        print profiler.output_text(unicode=True, color=True)
        with open("../sims/{}_profile.txt".format(run_name), "w") as f:
            f.write(profiler.output_text())
コード例 #53
0
    N_LUT = int(np.round(Fs / ref_freq_Hz))
    ref_freq = Fs / N_LUT

    # Arduino C++ style using for-loop
    lut_cos = np.full(N_LUT, np.nan)
    for i in range(N_LUT):
        # -- Cosine
        # N_LUT even: [ 0, 1]
        # N_LUT odd : [>0, 1]
        lut_cos[i] = 0.5 * (1 + np.cos(2 * np.pi * i / N_LUT))
    """

    #  Simulate incoming blocks on the fly
    # -------------------------------------
    if RUN_PYINSTRUMENT:
        profiler = Profiler()
        profiler.start()

    # DEV NOTE: The use of a `numba.njit(nogil=True)`` decorator on numpy
    # functions operating on the relatively small `state` time series will not
    # significantly improve the calculation speed. Simply refrain from using it
    # in the upcoming block of code.

    tick = Time.perf_counter()
    N_sim_blocks = int(len(time) / BLOCK_SIZE)
    for idx_sim_block in range(N_sim_blocks):
        sim_slice = slice(BLOCK_SIZE * idx_sim_block,
                          BLOCK_SIZE * (idx_sim_block + 1))

        # Stage 0
        # -------
コード例 #54
0
ファイル: __main__.py プロジェクト: asmeurer/pyinstrument
def main():
    usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
    version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
        v=pyinstrument.__version__,
        pyv=sys.version_info,
    )
    parser = optparse.OptionParser(usage=usage, version=version_string)
    parser.allow_interspersed_args = False

    def dash_m_callback(option, opt, value, parser):
        parser.values.module_name = value
        # everything after the -m argument should be passed to that module
        parser.values.module_args = parser.rargs + parser.largs
        parser.rargs[:] = []
        parser.largs[:] = []

    parser.add_option(
        '',
        '--load-prev',
        dest='load_prev',
        action='store',
        metavar='ID',
        help="Instead of running a script, load a previous report")

    parser.add_option(
        '-m',
        '',
        dest='module_name',
        action='callback',
        callback=dash_m_callback,
        type="str",
        help="run library module as a script, like 'python -m module'")

    parser.add_option('-o',
                      '--outfile',
                      dest="outfile",
                      action='store',
                      help="save to <outfile>",
                      default=None)

    parser.add_option(
        '-r',
        '--renderer',
        dest='renderer',
        action='store',
        type='string',
        help=
        ("how the report should be rendered. One of: 'text', 'html', 'json', or python "
         "import path to a renderer class"),
        default='text')

    parser.add_option('',
                      '--html',
                      dest="output_html",
                      action='store_true',
                      help=optparse.SUPPRESS_HELP,
                      default=False)  # deprecated shortcut for --renderer=html

    parser.add_option(
        '-t',
        '--timeline',
        dest='timeline',
        action='store_true',
        help=
        "render as a timeline - preserve ordering and don't condense repeated calls"
    )

    parser.add_option(
        '',
        '--hide',
        dest='hide_fnmatch',
        action='store',
        metavar='EXPR',
        help=
        ("glob-style pattern matching the file paths whose frames to hide. Defaults to "
         "'*{sep}lib{sep}*'.").format(sep=os.sep),
        default='*{sep}lib{sep}*'.format(sep=os.sep))
    parser.add_option(
        '',
        '--hide-regex',
        dest='hide_regex',
        action='store',
        metavar='REGEX',
        help=
        ("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
         "enough control."))

    parser.add_option('',
                      '--show-all',
                      dest='show_all',
                      action='store_true',
                      help="(text renderer only) show external library code",
                      default=False)

    parser.add_option('',
                      '--unicode',
                      dest='unicode',
                      action='store_true',
                      help='(text renderer only) force unicode text output')
    parser.add_option('',
                      '--no-unicode',
                      dest='unicode',
                      action='store_false',
                      help='(text renderer only) force ascii text output')

    parser.add_option('',
                      '--color',
                      dest='color',
                      action='store_true',
                      help='(text renderer only) force ansi color text output')
    parser.add_option('',
                      '--no-color',
                      dest='color',
                      action='store_false',
                      help='(text renderer only) force no color text output')

    if not sys.argv[1:]:
        parser.print_help()
        sys.exit(2)

    options, args = parser.parse_args()

    if not options.hide_regex:
        options.hide_regex = fnmatch.translate(options.hide_fnmatch)

    if args == [] and options.module_name is None and options.load_prev is None:
        parser.print_help()
        sys.exit(2)

    if options.load_prev:
        session = load_report(options.load_prev)
    else:
        if options.module_name is not None:
            sys.argv[:] = [options.module_name] + options.module_args
            code = "run_module(modname, run_name='__main__')"
            globs = {
                'run_module': runpy.run_module,
                'modname': options.module_name
            }
        else:
            sys.argv[:] = args
            progname = args[0]
            sys.path.insert(0, os.path.dirname(progname))
            with open(progname, 'rb') as fp:
                code = compile(fp.read(), progname, 'exec')
            globs = {
                '__file__': progname,
                '__name__': '__main__',
                '__package__': None,
            }

        profiler = Profiler()

        profiler.start()

        try:
            exec_(code, globs, None)
        except (SystemExit, KeyboardInterrupt):
            pass

        profiler.stop()
        session = profiler.last_session

    if options.output_html:
        options.renderer = 'html'

    output_to_temp_file = (options.renderer == 'html' and not options.outfile
                           and file_is_a_tty(sys.stdout))

    if options.outfile:
        f = codecs.open(options.outfile, 'w', 'utf-8')
        should_close_f_after_writing = True
    elif output_to_temp_file:
        output_file = tempfile.NamedTemporaryFile(suffix='.html', delete=False)
        f = codecs.getwriter('utf-8')(output_file)
        output_filename = output_file.name
        should_close_f_after_writing = True
    else:
        if PY2:
            f = codecs.getwriter('utf-8')(sys.stdout)
        else:
            f = sys.stdout
        should_close_f_after_writing = False

    renderer_kwargs = {'processor_options': {'hide_regex': options.hide_regex}}

    if options.timeline is not None:
        renderer_kwargs['timeline'] = options.timeline

    if options.renderer == 'text':
        unicode_override = options.unicode != None
        color_override = options.color != None
        unicode = options.unicode if unicode_override else file_supports_unicode(
            f)
        color = options.color if color_override else file_supports_color(f)

        renderer_kwargs.update({
            'unicode': unicode,
            'color': color,
            'show_all': options.show_all
        })

    renderer_class = get_renderer_class(options.renderer)
    renderer = renderer_class(**renderer_kwargs)

    # remove this frame from the trace
    renderer.processors.append(remove_first_pyinstrument_frame_processor)

    f.write(renderer.render(session))
    if should_close_f_after_writing:
        f.close()

    if output_to_temp_file:
        print('stdout is a terminal, so saved profile output to %s' %
              output_filename)
        import webbrowser
        from pyinstrument.vendor.six.moves import urllib
        url = urllib.parse.urlunparse(
            ('file', '', output_filename, '', '', ''))
        webbrowser.open(url)

    if options.renderer == 'text':
        _, report_identifier = save_report(session)
        print('To view this report with different options, run:')
        print('    pyinstrument --load-prev %s [options]' % report_identifier)
        print('')
コード例 #55
0
ファイル: test_profiler.py プロジェクト: ziux/pyinstrument
def test_empty_profile():
    with Profiler() as profiler:
        pass
    profiler.output(renderer=renderers.ConsoleRenderer())