Esempio n. 1
0
def test_get_symmetries():
    """Check that the function correctly creates six new reflected boards and
    policy vectors, one for each axis of symmetry.
    """
    game = BloomsGame(size=4, score_target=15)
    board = game.getInitBoard()

    # Place stones
    board.place_stone(position=(3, 1), colour=1)
    board.place_stone(position=(5, 1), colour=2)
    board.place_stone(position=(3, 5), colour=3)
    board.place_stone(position=(1, 5), colour=4)

    # board.visualise(show_coords=True, title="Original")

    # Create a dummy policy vector
    pi = np.random.random_sample(game.getActionSize())

    profiler = Profiler()
    profiler.start()

    symmetrical_states = game.getSymmetries(board, pi)

    profiler.stop()
    print(profiler.output_text(unicode=True, color=True))

    assert len(symmetrical_states) == 24
    assert symmetrical_states[1][1][37] == pi[52]
    assert symmetrical_states[1][1][614] == pi[1277]

    assert all(
        [sum(x[1]) == pytest.approx(sum(pi)) for x in symmetrical_states])
Esempio n. 2
0
    def forward(self, x):
        profiler = Profiler()
        profiler.start()
        if x.shape[-1] > 3:
            pos = x[:, :, :3]
            feat = x[:, :, 3:]
        else:
            pos = x
            feat = None
        pos, feat = self.sa_msg_module1(pos, feat)
        pos, feat = self.sa_msg_module2(pos, feat)
        h = self.sa_module3(pos, feat)

        h = self.mlp1(h)
        h = self.bn1(h)
        h = F.relu(h)
        h = self.drop1(h)
        h = self.mlp2(h)
        h = self.bn2(h)
        h = F.relu(h)
        h = self.drop2(h)

        out = self.mlp_out(h)
        profiler.stop()

        print(profiler.output_text(unicode=True, color=True))
        return out
Esempio n. 3
0
class ManagerProfilePyinstrument(ManagerProfile):
    def __init__(self, sync: bool):
        self.sync = sync
        self.profiler = Profiler()

    def start(self):
        self.profiler.start()
        return nullcontext()

    def stop_and_write(self,
                       path_profile: str,
                       is_docker: bool,
                       api: str,
                       render_browser: bool = False):
        self.profiler.stop()
        mode = "sync" if self.sync else "async"
        filename = f"pyinstrument_profile_{mode}_{api}.html"
        if not is_docker:
            output_html = self.profiler.output_html()
            self._write_output_file(path_profile,
                                    output_html,
                                    filename=filename)

        if render_browser:
            self.profiler.open_in_browser()

        print(self.profiler.output_text(unicode=True, color=True))

    def _write_output_file(self, path_profile: str, output_html: str,
                           filename: str):
        output_html_path = self._prepare_output_path(path_profile, filename)
        with open(output_html_path, "w") as file:
            file.write(output_html)
Esempio n. 4
0
 def wrapper(*args, **kwargs):
     profiler = Profiler()
     profiler.start()
     r = func(*args, **kwargs)
     profiler.stop()
     print profiler.output_text(color=True)
     return r
Esempio n. 5
0
class BenchmarkTomo(unittest.TestCase):
    """Run benchmarks for tomography reconstruction."""
    def setUp(self):
        """Create a test dataset."""
        self.profiler = Profiler()
        dataset_file = '../tests/data/tomo_setup.pickle.lzma'
        with lzma.open(dataset_file, 'rb') as file:
            [
                self.data,
                self.theta,
                self.original,
            ] = pickle.load(file)

    @unittest.skip('Demonstrate skipped tests.')
    def test_never(self):
        """Never run this test."""
        pass

    def test_cgrad(self):
        """Use pyinstrument to benchmark tomo.grad on one core."""
        logging.disable(logging.WARNING)
        result = {'obj': np.zeros(self.original.shape, dtype=np.complex64)}
        self.profiler.start()
        for i in range(50):
            result = tike.tomo.reconstruct(
                **result,
                theta=self.theta,
                integrals=self.data,
                algorithm='cgrad',
                num_iter=1,
            )
        self.profiler.stop()
        print('\n')
        print(self.profiler.output_text(unicode=True, color=True))
Esempio n. 6
0
File: debug.py Progetto: wmv/inbox
 def wrapper(*args, **kwargs):
     profiler = Profiler()
     profiler.start()
     r = func(*args, **kwargs)
     profiler.stop()
     print profiler.output_text(color=True)
     return r
Esempio n. 7
0
    def _report_from_filters(
        self, n_clicks, dd_name, input_name, *filter_values
    ):

        if n_clicks == 0:
            raise ValueError("no true report request, only toggle")

        stime = time.time()
        name = input_name
        for elem in dash.callback_context.triggered:
            if ElemIds.is_dd(elem["prop_id"]):
                name = dd_name

        logger.info("start filtering")
        if self._timing:
            logger.info("starting time profiling")
            profiler = Profiler()
            profiler.start()

        filtered_ddf = self._filter_ddf(filter_values)

        logger.info("calculating elements")
        elems = self.get_report_elems(filtered_ddf)
        logger.info(f"{len(elems)} elements returned")

        logger.info("parsing elements to output values")
        outputs = self._parse_report_elems_to_outputs(elems, name)
        logger.info(f"{len(outputs)} outputs parsed")
        logger.info("report done", runtime=round(time.time() - stime, 3))

        if self._timing:
            profiler.stop()
            log_profiler(profiler, logger, 20)

        return (name, name, *outputs)
Esempio n. 8
0
def instrument(
    cls_runner: tp.Type[Perf],
    pattern_func: str,
    timeline: bool = False,
) -> None:
    '''
    Profile the `sf` function from the supplied class.
    '''
    runner = cls_runner()
    for name in runner.iter_function_names(pattern_func):
        f = getattr(runner, name)
        profiler = Profiler(interval=0.0001)  # default is 0.001, 1 ms

        if timeline:
            profiler.start()
            f()
            profiler.stop()
        else:
            profiler.start()
            for _ in range(runner.NUMBER):
                f()
            profiler.stop()

        print(
            profiler.output_text(unicode=True,
                                 color=True,
                                 timeline=timeline,
                                 show_all=True))
def main(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')

    # creating random file for each usage of this function
    f= open("/home/"+str(random.randint(1,1100))+".html","w+")
    profiler = Profiler(use_signal=False) 
    profiler.start()

    n = int(req.params.get('n', 15))
        
    if not n:
        try:
            req_body = req.get_json()
        except ValueError:
            pass
        else:
            name = req_body.get('n')

    if n:
        output = str(fibnonci_approach(n))

        # stopping profiler and writing it to random file generated earlier
        profiler.stop()
        f.write(profiler.output_html())

        return func.HttpResponse("Fibnonci of " + str(n) + " using regular approach:" + output)
    else:
        return func.HttpResponse(
             "Please pass a name on the query string or in the request body",
             status_code=400
        )
Esempio n. 10
0
def profile_ctx(engine='pyinstrument'):
    """
    A context manager which profiles the body of the with statement
    with the supplied profiling engine and returns the profiling object
    in a list.

    Arguments
    ---------
    engine: str
      The profiling engine, e.g. 'pyinstrument' or 'snakeviz' 

    Returns
    -------
    sessions: list
      A list containing the profiling session.
    """
    if engine == 'pyinstrument':
        from pyinstrument import Profiler
        prof = Profiler()
        prof.start()
    elif engine == 'snakeviz':
        prof = Profile()
        prof.enable()
    elif engine is None:
        pass
    sessions = []
    yield sessions
    if engine == 'pyinstrument':
        sessions.append(prof.stop())
    elif engine == 'snakeviz':
        prof.disable()
        sessions.append(prof)
Esempio n. 11
0
class ProfilerMiddleware(object):
    def __init__(self):
        self.profiler = None

    def process_request(self, request):
        if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT',
                   'profile') in request.GET:
            self.profiler = Profiler()
            try:
                self.profiler.start()
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
                self.profiler = None

    def process_response(self, request, response):
        if self.profiler:
            try:
                self.profiler.stop()

                return HttpResponse(self.profiler.output_html())
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
            finally:
                self.profiler = None
        else:
            return response
Esempio n. 12
0
    def forward(self, x):
        profiler = Profiler()
        profiler.start()

        x = normalization(x)
        if x.shape[-1] > 3:
            pos = x[:, :, :3]
            feat = x[:, :, 3:]
        else:
            pos = x
            feat = None
        index_voxels, context_points, mask = self.selfvoxels(pos)
        pos, feat = self.sa_module1(pos, feat, index_voxels, context_points,
                                    mask)
        index_voxels, context_points, mask = self.selfvoxels(pos)
        pos, feat = self.sa_module2(pos, feat, index_voxels, context_points,
                                    mask)
        index_voxels, context_points, mask = self.selfvoxels(pos)
        h = self.sa_module3(pos, feat, index_voxels, context_points, mask)

        h = self.mlp1(h)
        h = self.bn1(h)
        h = F.relu(h)
        h = self.drop1(h)
        h = self.mlp2(h)
        h = self.bn2(h)
        h = F.relu(h)
        h = self.drop2(h)

        out = self.mlp_out(h)
        profiler.stop()

        print(profiler.output_text(unicode=True, color=True))
        return out
Esempio n. 13
0
class ProfilerMiddleware(object):
    def __init__(self):
        self.profiler = None

    def process_request(self, request):
        if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT', 'profile') in request.GET:
            self.profiler = Profiler()
            try:
                self.profiler.start()
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
                self.profiler = None


    def process_response(self, request, response):
        if self.profiler:
            try:
                self.profiler.stop()

                return HttpResponse(self.profiler.output_html())
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
            finally:
                self.profiler = None
        else:
            return response
def profile(output_format='html', output_filename=None, html_open=False):
    """Profiles a block of code with Pyinstrument.

    Intended to be used in a `with` statement.

    Parameters
    ----------
    output_format : {'html', 'text'}, optional
        Shows the result either as text or in an HTML file. If :code:`output_format = 'html'`,
        the file is saved according to the :code:`output_filename` parameter.
        The default is 'html'.
    output_filename : str, optional
        Only taken into account if :code:`output_format = 'html'`.
        If not given (default), the html output is saved to the same directory the caller resides.
        The name of the html file is the same as that of the caller.
    html_open : bool, optional
        Only taken into account if :code:`output_format = 'html'`.
        If True, the generated HTML file is opened in the default browser.
        The default is False.

    Yields
    ------

    Notes
    -----
    In the implementation, we move two levels up in the stack frame, one for exiting the context
    manager and one for exiting this generator. This assumes that :meth:`profile` was called as
    a context manager. As a good practice, provide the :code:`output_filename` input argument.

    Examples
    --------
    Measure the time needed to generate 1 million uniformly distributed random numbers.

    >>> import random
    >>> with profile('html') as p:
    ...    for _ in range(1000000):
    ...        rand_num = random.uniform(1, 2.2)

    """
    # If required, guess the path where the results will be saved
    if output_format == 'html' and not output_filename:
        caller = inspect.currentframe().f_back.f_back.f_locals['__file__']
        output_filename = os.path.splitext(caller)[0] + '.html'
    # Start the profiler
    profiler = Profiler()
    profiler.start()
    # Give back the execution to the caller function
    yield
    # Finish profiling and show the results
    profiler.stop()
    if output_format == 'html':
        if html_open:
            HTMLRenderer().open_in_browser(profiler.last_session,
                                           output_filename=output_filename)
        else:
            with codecs.open(output_filename, 'w', 'utf-8') as f:
                f.write(HTMLRenderer().render(profiler.last_session))
    elif output_format == 'text':
        print(profiler.output_text(unicode=True, color=True, show_all=True))
Esempio n. 15
0
 def wrapper(*args, **kwargs):
     from pyinstrument import Profiler
     profiler = Profiler()
     profiler.start()
     result = func(*args, **kwargs)
     profiler.stop()
     print(profiler.output_text(unicode=True, color=True))
     return result
Esempio n. 16
0
    def process_request(self, request):
        profile_dir = getattr(settings, 'PYINSTRUMENT_PROFILE_DIR', None)

        if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT', 'profile') in request.GET or profile_dir:
            profiler = Profiler()
            profiler.start()

            request.profiler = profiler
Esempio n. 17
0
    def train_models(
            self,
            partitions_path,
            graphs_path,
            init_lr=1e-4,
            train_batch_size=32,
            valid_batch_size=32,
            epoch_batch=5,
            epochs=100,
            checkpoint_path="checkpoint/aux_30_epoch_",  # This version only looks at race + age (not gender)
    ):
        with open(partitions_path, "rb") as fp:  # Unpickling
            set_partitions = pickle.load(fp)
        # Training Auxiliary Models!
        for i in range(0, int(epochs / epoch_batch)):
            data_generator_aux = UtkFaceDataGeneratorAuxOneModel(
                self.dataset_folder_name,
                set_partitions[i],
                self.dataset_dict,
                num_classes=self.num_classes)
            aux_train_idx, aux_valid_idx, _ = data_generator_aux.generate_split_indexes(
            )

            train_images_collection, train_status_collection = data_generator_aux.pre_generate_images(
                aux_train_idx, batch_size=train_batch_size)

            valid_images_collection, valid_status_collection = data_generator_aux.pre_generate_images(
                aux_valid_idx, batch_size=valid_batch_size)
            aux_train_gen = data_generator_aux.generate_images(
                is_training=True,
                images_collection=train_images_collection,
                status_collection=train_status_collection)
            aux_valid_gen = data_generator_aux.generate_images(
                is_training=True,
                images_collection=valid_images_collection,
                status_collection=valid_status_collection)

            aux_model = self.build_model()
            es = EarlyStopping(monitor='val_loss', mode='min', patience=10)
            profiler = Profiler()
            profiler.start()
            history = aux_model.fit(
                aux_train_gen,
                steps_per_epoch=len(aux_train_idx) // train_batch_size,
                epochs=(i + 1) * 5,
                validation_data=aux_valid_gen,
                validation_steps=len(aux_valid_idx) // valid_batch_size,
                callbacks=[es])
            profiler.stop()
            profiler.print(show_all=True)

            aux_model.save(str(checkpoint_path) + "_" + str((i + 1) * 5))
            y = history.history['val_loss']
            plt.plot([i for i in range(len(y))], history.history['val_loss'])
            plt.title("Auxiliary Model Validation Loss - {} Epochs".format(
                (i + 1) * 5))
            plt.savefig(graphs_path / "aux_30_epoch_val_loss_{}".format(
                (i + 1) * 5))
Esempio n. 18
0
    def train_model(self,
                    init_lr=1e-4,
                    train_batch_size=16,
                    valid_batch_size=16,
                    epoch_batch=5,
                    epochs=100,
                    checkpoint_dir="checkpoint/base_epochs_"):
        opt = Adam(lr=init_lr, decay=init_lr / epochs)
        self.model.compile(optimizer=opt,
                           loss={
                               'age_output': 'mse',
                               'race_output': 'categorical_crossentropy',
                               'gender_output': 'binary_crossentropy'
                           },
                           loss_weights={
                               'age_output': 4.,
                               'race_output': 1.5,
                               'gender_output': 0.1
                           },
                           metrics={
                               'age_output': 'mae',
                               'race_output': 'accuracy',
                               'gender_output': 'accuracy'
                           })

        for i in range(int(epochs / epoch_batch)):
            current_checkpoint = checkpoint_dir / "base_epochs_{}".format(
                str((i + 1) * epoch_batch))
            if i != 0:
                self.model = load_model(
                    checkpoint_dir /
                    "base_epochs_{}".format(str((i) * epoch_batch)))
            print(len(self.train_idx), len(self.valid_idx))
            train_images_collection, train_status_collection = self.data_generator.pre_generate_images(
                self.train_idx, batch_size=train_batch_size)

            valid_images_collection, valid_status_collection = self.data_generator.pre_generate_images(
                self.valid_idx, batch_size=valid_batch_size)
            train_gen = self.data_generator.generate_images(
                is_training=True,
                images_collection=train_images_collection,
                status_collection=train_status_collection)
            valid_gen = self.data_generator.generate_images(
                is_training=True,
                images_collection=valid_images_collection,
                status_collection=valid_status_collection)
            profiler = Profiler()
            profiler.start()
            history = self.model.fit_generator(
                train_gen,
                steps_per_epoch=len(self.train_idx) // train_batch_size,
                # epochs=epoch_batch,
                epochs=1,
                validation_data=valid_gen,
                validation_steps=len(self.valid_idx) // valid_batch_size)
            profiler.stop()
            profiler.print(show_all=True)
Esempio n. 19
0
def bench_publish(endpoint, appkey, channel, size, profile, ack=True):
    publisher = satori.rtm.connection.Connection(endpoint + '?appkey=' +
                                                 appkey)
    publisher.start()

    message = binascii.hexlify(os.urandom(size // 2)).decode('ascii')
    print('Message size is {}'.format(len(message)))

    last_usage = [resource.getrusage(resource.RUSAGE_SELF)]
    print(
        'Duration, s\tRate, msgs/s\tMax RSS, MB\tUser time, s\tSystem time, s')

    def report(duration, count):
        usage = resource.getrusage(resource.RUSAGE_SELF)
        maxrss = usage.ru_maxrss // 1024
        if sys.platform == 'darwin':
            maxrss = maxrss // 1024
        print('{0:2.2f}\t\t{1}\t\t{2}\t\t{3:2.2f}\t\t{4:2.2f}'.format(
            duration, int(count / duration), maxrss,
            usage.ru_utime - last_usage[0].ru_utime,
            usage.ru_stime - last_usage[0].ru_stime))
        sys.stdout.flush()
        last_usage[0] = usage

    count = [0]

    def publish_without_ack():
        publisher.publish(channel, message)
        count[0] += 1

    def publish_with_ack():
        def callback(ack):
            count[0] += 1

        publisher.publish(channel, message, callback)

    publish = publish_with_ack if ack else publish_without_ack

    before = time.time()
    try:
        if profile:
            profiler = Profiler()
            profiler.start()
        while True:
            now = time.time()
            if now - before >= sampling_interval:
                report(now - before, count[0])
                if profile:
                    profiler.stop()
                    print(profiler.output_text(unicode=True, color=True))
                    profiler = Profiler()
                    profiler.start()
                count[0] = 0
                before = time.time()
            publish()
    except KeyboardInterrupt:
        sys.exit(0)
Esempio n. 20
0
    def process_request(self, request):
        profile_dir = getattr(settings, 'PYINSTRUMENT_PROFILE_DIR', None)

        if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT',
                   'profile') in request.GET or profile_dir:
            profiler = Profiler()
            profiler.start()

            request.profiler = profiler
Esempio n. 21
0
def deep_profiler_session():
    profiler = Profiler()
    profiler.start()

    # give 120 frames for pyinstrument to do its work.
    recursion_depth = sys.getrecursionlimit() - current_stack_depth() - 120
    recurse(recursion_depth)

    profiler.stop()
    return profiler.last_session
Esempio n. 22
0
def test_profiler_doesnt_overflow_on_large_call_stacks():
    profiler = Profiler()
    profiler.start()

    # give 170 frames of leeway for the test runner and for pyinstrument to do its work.
    recursion_depth = sys.getrecursionlimit() - 170
    recurse(recursion_depth)

    profiler.stop()
    print(profiler.output_text())
Esempio n. 23
0
def test_profiler_doesnt_overflow_on_large_call_stacks():
    profiler = Profiler()
    profiler.start()

    # give ourselves 150 frames of leeway to do our work.
    recursion_depth = sys.getrecursionlimit() - 150
    recurse(recursion_depth)

    profiler.stop()
    print(profiler.output_text())
Esempio n. 24
0
 def pytinstrument_decorator(*args, **kwargs):
     # Create profiler instance before function is called.
     profiler = Profiler()
     profiler.start()
     # Call function.
     func_return = func(*args, **kwargs)
     # Stop profiler and print results after function is called.
     profiler.stop()
     print(profiler.output_text(unicode=False, color=True))
     return func_return
Esempio n. 25
0
def profile():
    profiler = Profiler()
    profiler.start()

    exit_code = runtests.run_tests_all(parallel=False)

    profiler.stop()

    print(profiler.output_text(unicode=False, color=True))
    return exit_code
Esempio n. 26
0
def main():
    usage = "usage: %prog [-h] [-o output_file_path] scriptfile [arg] ..."
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False
    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help="output HTML instead of text", default=False)
    parser.add_option('-o', '--outfile',
        dest="outfile", action='store', 
        help="save stats to <outfile>", default=None)

    if not sys.argv[1:]:
        parser.print_usage()
        sys.exit(2)

    (options, args) = parser.parse_args()
    sys.argv[:] = args

    if len(args) > 0:
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))

        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

        profiler = Profiler()
        profiler.start()

        try:
            exec code in globs, None
        except SystemExit, KeyboardInterrupt:
            pass

        profiler.stop()

        if options.outfile:
            f = codecs.open(options.outfile, 'w', 'utf-8')
            unicode = True
            color = False
        else:
            f = sys.stdout
            unicode = stdout_supports_unicode()
            color = stdout_supports_color()

        if options.output_html:
            f.write(profiler.output_html())
        else:
            f.write(profiler.output_text(unicode=unicode, color=color))

        f.close()
Esempio n. 27
0
 def wrapper(*args, **kwargs):
     profiler = None
     if CONFIG.profiler_enable:
         profiler = Profiler()
         profiler.start()
     try:
         return f(*args, **kwargs)
     finally:
         if profiler is not None:
             profiler.stop()
             print(profiler.output_text(unicode=True, color=True))
Esempio n. 28
0
def attach_profiler():
    profiler = Profiler()
    profiler.start()

    def handle_signal(signum, frame):
        print profiler.output_text(color=True)
        # Work around an arguable bug in pyinstrument in which output gets
        # frozen after the first call to profiler.output_text()
        delattr(profiler, "_root_frame")

    signal.signal(signal.SIGTRAP, handle_signal)
Esempio n. 29
0
def main():
    profiler = Profiler()

    sim = Simulation()
    sim.setup()

    profiler.start()
    for i in range(100):
        sim.update()
    profiler.stop()
    print(profiler.output_text(unicode=True, color=True))
    def process_request(self, request):
        profile_dir = getattr(settings, 'PYINSTRUMENT_PROFILE_DIR', None)
        use_signal = getattr(settings, 'PYINSTRUMENT_USE_SIGNAL', True)

        if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT', 'profile') in request.GET or profile_dir:
            profiler = Profiler(use_signal=use_signal)
            try:
                profiler.start()
                request.profiler = profiler
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
Esempio n. 31
0
File: debug.py Progetto: wmv/inbox
def attach_profiler():
    profiler = Profiler()
    profiler.start()

    def handle_signal(signum, frame):
        print profiler.output_text(color=True)
        # Work around an arguable bug in pyinstrument in which output gets
        # frozen after the first call to profiler.output_text()
        delattr(profiler, '_root_frame')

    signal.signal(signal.SIGTRAP, handle_signal)
Esempio n. 32
0
def instrument(filename="results"):
    profiler = Profiler()
    directory = Path("profiling/results/")
    try:
        profiler.start()
        yield
    finally:
        profiler.stop()
        directory.mkdir(parents=True, exist_ok=True)
        path = os.path.join(directory, f"{filename}.html")
        with open(path, "w") as fs:
            fs.write(profiler.output_html(timeline=True))
Esempio n. 33
0
def profiler_session(session: str) -> None:
    "invoke code with sampling profiler"
    has_profiler = profiler_enable()
    if has_profiler:
        profiler = Profiler(interval=profiler_interval())
        profiler.start()
    try:
        yield
    finally:
        if has_profiler:
            profiler.stop()
            profiler_produce_report(profiler, session)
Esempio n. 34
0
    def decorated_view(*args, **kwargs):

        profiler = Profiler()
        profiler.start()

        result = func(*args, **kwargs)

        profiler.stop()

        print(profiler.output_text(unicode=True, color=True))

        return result
Esempio n. 35
0
    def process_request(self, request):
        profile_dir = getattr(settings, 'PYINSTRUMENT_PROFILE_DIR', None)
        use_signal = getattr(settings, 'PYINSTRUMENT_USE_SIGNAL', True)

        if getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT',
                   'profile') in request.GET or profile_dir:
            profiler = Profiler(use_signal=use_signal)
            try:
                profiler.start()
                request.profiler = profiler
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
Esempio n. 36
0
def attach_pyinstrument_profiler():
    """Run the pyinstrument profiler in the background and dump its output to
    stdout when the process receives SIGTRAP. In general, you probably want to
    use the facilities in inbox.util.profiling instead."""
    profiler = Profiler()
    profiler.start()

    def handle_signal(signum, frame):
        print profiler.output_text(color=True)
        # Work around an arguable bug in pyinstrument in which output gets
        # frozen after the first call to profiler.output_text()
        delattr(profiler, '_root_frame')

    signal.signal(signal.SIGTRAP, handle_signal)
Esempio n. 37
0
def instrument(cls: tp.Type[PerfTest], function: str = 'sf') -> None:
    '''
    Profile the `sf` function from the supplied class.
    '''

    f = getattr(cls, function)
    profiler = Profiler()

    profiler.start()
    for _ in range(cls.NUMBER):
        f()
    profiler.stop()

    print(profiler.output_text(unicode=True, color=True))
Esempio n. 38
0
def test_profiler_retains_multiple_calls():
    profiler = Profiler()
    profiler.start()

    long_function_a()
    long_function_b()
    long_function_a()
    long_function_b()

    profiler.stop()

    print(profiler.output_text())

    frame = profiler.last_session.root_frame()
    assert frame.function == 'test_profiler_retains_multiple_calls'
    assert len(frame.children) == 4
Esempio n. 39
0
def main():
    usage = "usage: %prog [-h] [[-o output_file_path] scriptfile [arg] ...] | [ -i infile ]"
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False
    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help="output HTML instead of text", default=False)
    parser.add_option('', '--json',
        dest="output_json", action='store_true',
        help="output raw JSON dump instead of text or HTML", default=False)
    parser.add_option('-o', '--outfile',
        dest="outfile", action='store', 
        help="save stats to <outfile>", default=None)
    parser.add_option('-i', '--infile',
        dest="infile", action='store', 
        help="load stats from JSON file <infile>", default=None)

    if not sys.argv[1:]:
        parser.print_usage()
        sys.exit(2)

    (options, args) = parser.parse_args()
    sys.argv[:] = args

    if len(args) > 0:
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))

        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

        profiler = Profiler()
        profiler.start()

        try:
            exec code in globs, None
        except SystemExit, KeyboardInterrupt:
            pass

        profiler.stop()
        
        write_output( options, profiler )
Esempio n. 40
0
    def process_request(self, request):
        profile_dir = getattr(settings, 'PYINSTRUMENT_PROFILE_DIR', None)
        use_signal = getattr(settings, 'PYINSTRUMENT_USE_SIGNAL', True)
        collect_args = getattr(settings, 'PYINSTRUMENT_COLLECT_ARGS', False)

        profiler = None
        if getattr(settings, 'PYINSTRUMENT_URL_COLLECT_ARGS_ARGUMENT', 'profile_collect_args') in request.GET:
            profiler = Profiler(use_signal=use_signal, collect_args=True)
        elif getattr(settings, 'PYINSTRUMENT_URL_ARGUMENT', 'profile') in request.GET or profile_dir:
            profiler = Profiler(use_signal=use_signal, collect_args=collect_args)

        if profiler:
            try:
                profiler.start()
                request.profiler = profiler
            except NotMainThreadError:
                raise NotMainThreadError(not_main_thread_message)
Esempio n. 41
0
def run_profiling(args):
    lprofiler = LineProfiler() 

    monitor_fuctions = [api.problem.submit_key, api.problem.get_unlocked_pids, api.problem.get_solved_pids,
                        api.problem.get_all_problems, api.problem.get_solved_problems, api.stats.get_score,
                        api.cache.memoize, api.autogen.grade_problem_instance, api.autogen.get_problem_instance,
                        api.autogen.get_number_of_instances]

    for func in monitor_fuctions:
        lprofiler.add_function(func)

    lprofiler.enable()

    if args.stack:
        profiler = Profiler(use_signal=False)
        profiler.start()

    for func, a, kw in operations:
        func(*a, **kw)

    if args.stack:
        profiler.stop()

    lprofiler.disable()

    if args.print:
        print(profiler.output_text(unicode=True, color=True))
        lprofiler.print_stats()

    output = open(args.output, "w")

    if args.stack:
        output.write(profiler.output_text(unicode=True))

        if args.output_html is not None:
            output_html = open(args.output_html, "w")
            output_html.write(profiler.output_html())
            output_html.close()
            print("Wrote test info to " + args.output_html)

    lprofiler.print_stats(output)
    output.close()
    print("Wrote test info to " + args.output)
Esempio n. 42
0
def test_collapses_multiple_calls_by_default():
    profiler = Profiler()
    profiler.start()

    long_function_a()
    long_function_b()
    long_function_a()
    long_function_b()

    profiler.stop()

    text_output = profiler.output_text()

    # output should be something like:
    # 1.513 test_collapses_multiple_calls_by_default  test/test_profiler.py:25
    # |- 0.507 long_function_a  test/test_profiler.py:17
    # |- 0.503 long_function_b  test/test_profiler.py:20

    assert text_output.count('test_collapses_multiple_calls_by_default') == 1
    assert text_output.count('long_function_a') == 1
    assert text_output.count('long_function_b') == 1
Esempio n. 43
0
def test_two_functions():
    profiler = Profiler()
    profiler.start()

    long_function_a()
    long_function_b()

    profiler.stop()

    print(profiler.output_text())

    frame = profiler.last_session.root_frame()

    assert frame.function == 'test_two_functions'
    assert len(frame.children) == 2

    frame_b, frame_a = sorted(frame.children, key=lambda f: f.time(), reverse=True)

    assert frame_a.function == 'long_function_a'
    assert frame_b.function == 'long_function_b'
    assert 0.2 < frame_a.time() < 0.3
    assert 0.45 < frame_b.time() < 0.55
Esempio n. 44
0
class OpenAssessmentTest(WebAppTest):
    """
    UI-level acceptance tests for Open Assessment.
    """
    TEST_COURSE_ID = "course-v1:edx+ORA203+course"

    PROBLEM_LOCATIONS = {
        'staff_only':
            u'courses/{test_course_id}/courseware/'
            u'61944efb38a349edb140c762c7419b50/415c3ee1b7d04b58a1887a6fe82b31d6/'.format(test_course_id=TEST_COURSE_ID),
        'self_only':
            u'courses/{test_course_id}/courseware/'
            u'a4dfec19cf9b4a6fb5b18be6ccd9cecc/338a4affb58a45459629e0566291381e/'.format(test_course_id=TEST_COURSE_ID),
        'peer_only':
            u'courses/{test_course_id}/courseware/'
            u'a4dfec19cf9b4a6fb5b18be6ccd9cecc/417e47b2663a4f79b62dba20b21628c8/'.format(test_course_id=TEST_COURSE_ID),
        'student_training':
            u'courses/{test_course_id}/courseware/'
            u'676026889c884ac1827688750871c825/5663e9b038434636977a4226d668fe02/'.format(test_course_id=TEST_COURSE_ID),
        'file_upload':
            u'courses/{test_course_id}/courseware/'
            u'57a3f9d51d424f6cb922f0d69cba868d/bb563abc989340d8806920902f267ca3/'.format(test_course_id=TEST_COURSE_ID),
        'full_workflow_staff_override':
            u'courses/{test_course_id}/courseware/'
            u'676026889c884ac1827688750871c825/181ea9ff144c4766be44eb8cb360e34f/'.format(test_course_id=TEST_COURSE_ID),
        'full_workflow_staff_required':
            u'courses/{test_course_id}/courseware/'
            u'8d9584d242b44343bc270ea5ef04ab03/0b0dcc728abe45138c650732af178afb/'.format(test_course_id=TEST_COURSE_ID),
    }

    SUBMISSION = u"This is a test submission."
    LATEX_SUBMISSION = u"[mathjaxinline]( \int_{0}^{1}xdx )[/mathjaxinline]"
    OPTIONS_SELECTED = [1, 2]
    STAFF_OVERRIDE_OPTIONS_SELECTED = [0, 1]
    STAFF_OVERRIDE_SCORE = 1
    STAFF_GRADE_EXISTS = "COMPLETE"
    STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE = "YOU MUST COMPLETE THE STEPS ABOVE TO VIEW YOUR GRADE"
    STAFF_AREA_SCORE = "Final grade: {} out of 8"
    STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE = "The problem has not been completed."
    EXPECTED_SCORE = 6
    STUDENT_TRAINING_OPTIONS = [
        [1, 2],
        [0, 2]
    ]

    TEST_PASSWORD = "******"

    def setUp(self, problem_type, staff=False):
        """
        Configure page objects to test Open Assessment.

        Args:
            problem_type (str): The type of problem being tested,
              used to choose which part of the course to load.
            staff (bool): If True, runs the test with a staff user (defaults to False).

        """
        super(OpenAssessmentTest, self).setUp()

        if PROFILING_ENABLED:
            self.profiler = Profiler(use_signal=False)
            self.profiler.start()

        self.problem_loc = self.PROBLEM_LOCATIONS[problem_type]
        self.auto_auth_page = AutoAuthPage(self.browser, course_id=self.TEST_COURSE_ID, staff=staff)
        self.submission_page = SubmissionPage(self.browser, self.problem_loc)
        self.self_asmnt_page = AssessmentPage('self-assessment', self.browser, self.problem_loc)
        self.peer_asmnt_page = AssessmentPage('peer-assessment', self.browser, self.problem_loc)
        self.student_training_page = AssessmentPage('student-training', self.browser, self.problem_loc)
        self.staff_asmnt_page = AssessmentPage('staff-assessment', self.browser, self.problem_loc)
        self.grade_page = GradePage(self.browser, self.problem_loc)

    def log_to_file(self):
        with open('{}-profile.log'.format(self.id()), 'w') as f:
            f.write(self.profiler.output_text())

    def tearDown(self):
        if PROFILING_ENABLED:
            self.profiler.stop()
            self.log_to_file()

    def login_user(self, learner, email):
        """
        Logs in an already existing user.

        Args:
            learner (str): the username of the user.
            email (str): email address of the user.
        """
        auto_auth_page = AutoAuthPage(
            self.browser, email=email, password=self.TEST_PASSWORD, username=learner,
            course_id=self.TEST_COURSE_ID, staff=True
        )
        auto_auth_page.visit()

    def do_self_assessment(self):
        """
        Creates a user, submits a self assessment, verifies the grade, and returns the username of the
        learner for which the self assessment was submitted.
        """
        self.auto_auth_page.visit()
        username, _ = self.auto_auth_page.get_username_and_email()
        self.submission_page.visit().submit_response(self.SUBMISSION)
        self.assertTrue(self.submission_page.has_submitted)

        # Submit a self-assessment
        self.submit_self_assessment(self.OPTIONS_SELECTED)

        # Verify the grade
        self.assertEqual(self.EXPECTED_SCORE, self.grade_page.wait_for_page().score)

        return username

    def submit_self_assessment(self, options=OPTIONS_SELECTED):
        """
        Submit a self assessment for the currently logged in student. Do not verify grade.

        Args:
            options: the options to select for the self assessment
                (will use OPTIONS_SELECTED if not specified)
        """
        self.self_asmnt_page.wait_for_page().wait_for_response()
        self.assertIn(self.SUBMISSION, self.self_asmnt_page.response_text)
        self.self_asmnt_page.assess("self", options).wait_for_complete()
        self.assertTrue(self.self_asmnt_page.is_complete)

    def _verify_staff_grade_section(self, expected_status, expected_message_title):
        """
        Verifies the expected status and message text in the Staff Grade section
        (as shown to the learner).
        """
        self.staff_asmnt_page.wait_for_page()
        self.assertEqual("Staff Grade", self.staff_asmnt_page.label)
        self.staff_asmnt_page.verify_status_value(expected_status)
        self.assertEqual(expected_message_title, self.staff_asmnt_page.message_title)

    def do_training(self):
        """
        Complete two training examples, satisfying the requirements.
        """
        for example_num, options_selected in enumerate(self.STUDENT_TRAINING_OPTIONS):
            if example_num > 0:
                try:
                    self.student_training_page.wait_for_num_completed(example_num)
                except BrokenPromise:
                    msg = "Did not complete at least {num} student training example(s).".format(num=example_num)
                    self.fail(msg)

            self.student_training_page.wait_for_page().wait_for_response().assess("training", options_selected)

        # Check that we've completed student training
        try:
            self.student_training_page.wait_for_complete()
        except BrokenPromise:
            self.fail("Student training was not marked complete.")

    def do_peer_assessment(self, count=1, options=OPTIONS_SELECTED):
        """
        Does the specified number of peer assessments.

        Args:
            count: the number of assessments that must be completed (defaults to 1)
            options: the options to use (defaults to OPTIONS_SELECTED)
        """
        self.peer_asmnt_page.visit()

        for count_assessed in range(1, count + 1):
            self.peer_asmnt_page.wait_for_page().wait_for_response().assess("peer", options)
            self.peer_asmnt_page.wait_for_num_completed(count_assessed)

    def do_staff_override(self, username, final_score=STAFF_AREA_SCORE.format(STAFF_OVERRIDE_SCORE)):
        """
        Complete a staff assessment (grade override).

        Args:
            username: the learner to grade
            final_score: the expected final score as shown in the staff area
                (defaults to the staff override score value)
        """
        self.staff_area_page.visit()
        self.staff_area_page.show_learner(username)
        self.staff_area_page.expand_learner_report_sections()
        self.staff_area_page.staff_assess(self.STAFF_OVERRIDE_OPTIONS_SELECTED, "override")
        self.staff_area_page.verify_learner_final_score(final_score)

    def do_staff_assessment(self, number_to_assess=0, options_selected=OPTIONS_SELECTED):
        """
        Use staff tools to assess available responses.

        Args:
            number_to_assess: the number of submissions to assess. If not provided (or 0),
                will grade all available submissions.
        """
        self.staff_area_page.visit()
        self.staff_area_page.click_staff_toolbar_button("staff-grading")
        # Get the counts before checking out a submission for assessment.
        start_numbers = self.staff_area_page.available_checked_out_numbers
        # Check out a submission.
        self.staff_area_page.expand_staff_grading_section()
        # Checked out number should increase, ungraded decrease.
        ungraded = start_numbers[0]-1
        checked_out = start_numbers[1]+1
        self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out))
        assessed = 0
        while number_to_assess == 0 or assessed < number_to_assess:
            continue_after = False if number_to_assess-1 == assessed else ungraded > 0
            self.staff_area_page.staff_assess(options_selected, "full-grade", continue_after)
            assessed += 1
            if not continue_after:
                self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out-1))
                break
            else:
                ungraded -=1
                self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out))

    def refresh_page(self):
        """
        Helper method that waits for "unsaved changes" warnings to clear before refreshing the page.
        """
        EmptyPromise(
            lambda: self.browser.execute_script("return window.onbeforeunload === null"),
            "Unsubmitted changes exist on page."
        ).fulfill()
        self.browser.refresh()
Esempio n. 45
0
def main():
    usage = ("usage: python -m pyinstrument [options] scriptfile [arg] ...")
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False
    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help="output HTML instead of text", default=False)
    parser.add_option('-o', '--outfile',
        dest="outfile", action='store',
        help="save report to <outfile>", default=None)

    parser.add_option('', '--unicode',
        dest='unicode', action='store_true',
        help='force unicode text output')
    parser.add_option('', '--no-unicode',
        dest='unicode', action='store_false',
        help='force ascii text output')

    parser.add_option('', '--color',
        dest='color', action='store_true',
        help='force ansi color text output')
    parser.add_option('', '--no-color',
        dest='color', action='store_false',
        help='force no color text output')

    if not sys.argv[1:]:
        parser.print_help()
        sys.exit(2)

    (options, args) = parser.parse_args()
    sys.argv[:] = args

    if len(args) > 0:
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))

        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

        try:
            profiler = Profiler()
        except SignalUnavailableError:
            profiler = Profiler(use_signal=False)

        profiler.start()

        try:
            exec_(code, globs, None)
        except (SystemExit, KeyboardInterrupt):
            pass

        profiler.stop()

        if options.outfile:
            f = codecs.open(options.outfile, 'w', 'utf-8')
        else:
            f = sys.stdout

        unicode_override = options.unicode != None
        color_override = options.color != None

        unicode = options.unicode if unicode_override else file_supports_unicode(f)
        color = options.color if color_override else file_supports_color(f)

        if options.output_html:
            f.write(profiler.output_html())
        else:
            f.write(profiler.output_text(unicode=unicode, color=color))

        f.close()
    else:
        parser.print_usage()
    return parser
Esempio n. 46
0
def observe(config_file, profile=False, raise_queue_empty=True):

    if profile:
        try:
            from pyinstrument import Profiler
        except ImportError:
            print "Error importing pyinstrument"
            profile = False

    ztf_config = ZTFConfiguration("../sims/{}".format(config_file))

    # load config parameters into local variables
    run_name = ztf_config.config["run_name"]
    start_time = ztf_config.config["start_time"]
    weather_year = ztf_config.config["weather_year"]
    if weather_year == "None":
        weather_year = None
    survey_duration = ztf_config.config["survey_duration_days"] * u.day
    block_programs = ztf_config.config["block_programs"]
    observing_programs = ztf_config.build_observing_programs()

    if profile:
        if survey_duration > 1.0 * u.day:
            print ("Don't profile long runs: 25% overhead")
            profile = False
        else:
            profiler = Profiler()
            profiler.start()

    survey_start_time = Time(start_time, scale="utc", location=P48_loc)

    tel = ZTFStateMachine(
        current_time=survey_start_time,
        historical_observability_year=weather_year,
        logfile="../sims/{}_log.txt".format(run_name),
    )

    # set up QueueManager
    Q = GreedyQueueManager(block_programs=block_programs)

    for op in observing_programs:
        Q.add_observing_program(op)

    # initialize nightly field requests (Tom Barlow function)
    Q.assign_nightly_requests(tel.current_state_dict())

    # initialize sqlite history
    log = ObsLogger(run_name, tel.current_time)

    current_night_mjd = np.floor(tel.current_time.mjd)

    while tel.current_time < (survey_start_time + survey_duration):

        # check if it is a new night and reload queue with new requests
        if np.floor(tel.current_time.mjd) > current_night_mjd:
            log.prev_obs = None
            Q.assign_nightly_requests(tel.current_state_dict())
            current_night_mjd = np.floor(tel.current_time.mjd)

        if tel.check_if_ready():
            current_state = tel.current_state_dict()
            # get coords
            try:
                next_obs = Q.next_obs(current_state)
                # TODO: debugging check...
                assert next_obs["request_id"] in Q.queue.index
            except QueueEmptyError:
                if not raise_queue_empty:
                    tel.logger.info("Queue empty!  Waiting...")
                    log.prev_obs = None
                    tel.wait()
                    continue
                else:
                    raise QueueEmptyError("Queue is empty")

            # try to change filters, if needed
            if next_obs["target_filter_id"] != current_state["current_filter_id"]:
                if not tel.start_filter_change(next_obs["target_filter_id"]):
                    tel.logger.info("Filter change failure!  Waiting...")
                    log.prev_obs = None
                    tel.wait()
                    continue

            # try to slew to the next target
            if not tel.start_slew(coord.SkyCoord(next_obs["target_ra"] * u.deg, next_obs["target_dec"] * u.deg)):
                tel.set_cant_observe()
                # TODO: log the failure
                # "missed history": http://ops2.lsst.org/docs/current/architecture.html#output-tables
                tel.logger.info(
                    "Failure slewing to {}, {}!  Waiting...".format(
                        next_obs["target_ra"] * u.deg, next_obs["target_dec"] * u.deg
                    )
                )
                log.prev_obs = None
                tel.wait()
                continue

            # try to expose
            if not tel.start_exposing():
                tel.set_cant_observe()
                tel.logger.info("Exposure failure!  Waiting...")
                log.prev_obs = None
                tel.wait()
                continue
            else:
                # exposure completed successfully.  now
                # a) store exposure information in pointing history sqlite db
                current_state = tel.current_state_dict()
                log.log_pointing(current_state, next_obs)
                # b) update Fields
                Q.fields.mark_field_observed(next_obs, current_state)
                # c) remove completed request_id from the pool and the queue
                # TODO: debugging check
                assert next_obs["request_id"] in Q.queue.index
                Q.remove_requests(next_obs["request_id"])
        else:
            log.prev_obs = None
            tel.set_cant_observe()
            tel.wait()

    if profile:
        profiler.stop()
        print profiler.output_text(unicode=True, color=True)
        with open("../sims/{}_profile.txt".format(run_name), "w") as f:
            f.write(profiler.output_text())
Esempio n. 47
0
from pyinstrument import Profiler
from platform import platform

p = Profiler()

p.start()

def func():
    fd = open('/dev/urandom', 'rb')
    data = fd.read(1024*1024)

func()

# this failed on ubuntu 12.04 
platform()

p.stop()

print(p.output_text())

with open('ioerror_out.html', 'w') as f:
    f.write(p.output_html())
Esempio n. 48
0
from pyinstrument import Profiler

import numpy as np
import pyamg

profiler = Profiler()
profiler.start()

n = int(1e3)
A = pyamg.gallery.poisson((n, n), format='csr')
b = np.random.rand(A.shape[0])

ml = pyamg.smoothed_aggregation_solver(A, max_coarse=10)
res = []
x = ml.solve(b, accel='cg', residuals=res)
print(len(res))

profiler.stop()

print(profiler.output_text(unicode=True, color=True))
Esempio n. 49
0
def main():
    usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
    parser = OptionParser(usage=usage)
    parser.allow_interspersed_args = False

    parser.add_option('', '--setprofile',
        dest='setprofile', action='store_true',
        help='run in setprofile mode, instead of signal mode', default=False)

    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help="output HTML instead of text", default=False)
    parser.add_option('-o', '--outfile',
        dest="outfile", action='store',
        help="save report to <outfile>", default=None)

    parser.add_option('', '--unicode',
        dest='unicode', action='store_true',
        help='force unicode text output')
    parser.add_option('', '--no-unicode',
        dest='unicode', action='store_false',
        help='force ascii text output')

    parser.add_option('', '--color',
        dest='color', action='store_true',
        help='force ansi color text output')
    parser.add_option('', '--no-color',
        dest='color', action='store_false',
        help='force no color text output')

    if not sys.argv[1:]:
        parser.print_help()
        sys.exit(2)

    (options, args) = parser.parse_args()
    sys.argv[:] = args

    if len(args) > 0:
        progname = args[0]
        sys.path.insert(0, os.path.dirname(progname))

        with open(progname, 'rb') as fp:
            code = compile(fp.read(), progname, 'exec')
        globs = {
            '__file__': progname,
            '__name__': '__main__',
            '__package__': None,
        }

        try:
            profiler = Profiler(use_signal=not options.setprofile)
        except SignalUnavailableError:
            profiler = Profiler(use_signal=False)

        profiler.start()

        try:
            exec_(code, globs, None)
        except IOError as e:
            import errno

            if e.errno == errno.EINTR:
                print(
                    'Failed to run program due to interrupted system system call.\n'
                    'This happens because pyinstrument is sending OS signals to the running\n'
                    'process to interrupt it. If your program has long-running syscalls this\n'
                    'can cause a problem.\n'
                    '\n'
                    'You can avoid this error by running in \'setprofile\' mode. Do this by\n'
                    'passing \'--setprofile\' when calling pyinstrument at the command-line.\n'
                    '\n'
                    'For more information, see\n'
                    'https://github.com/joerick/pyinstrument/issues/16\n'
                )

            raise
        except (SystemExit, KeyboardInterrupt):
            pass

        profiler.stop()

        if options.outfile:
            f = codecs.open(options.outfile, 'w', 'utf-8')
        else:
            f = sys.stdout

        unicode_override = options.unicode != None
        color_override = options.color != None

        unicode = options.unicode if unicode_override else file_supports_unicode(f)
        color = options.color if color_override else file_supports_color(f)

        if options.output_html:
            f.write(profiler.output_html())
        else:
            f.write(profiler.output_text(unicode=unicode, color=color))

        f.close()
    else:
        parser.print_usage()
    return parser
Esempio n. 50
0
def main():
    """Command-line utility for using (and testing) s3 utility methods."""
    logging.basicConfig(level=logging.DEBUG)
    arg_parser = argparse.ArgumentParser(description='Perform obfuscation of forum .mongo dump files.')

    arg_parser.add_argument(
        'input',
        help='Read mongo files from this location.',
    )
    arg_parser.add_argument(
        '-o', '--output',
        help='Write obfuscated mongo files to this location in the local file system.',
        default=None
    )
    arg_parser.add_argument(
        '-u', '--userinfo',
        help='For events, read a custom user-info file from the local fs that contains username, email, user-id, fullname.',
        default=None
    )
    arg_parser.add_argument(
        '--log-context',
        help='characters on each side of match',
        type=int,
        default=50,
    )
    #####################
    # Flags to indicate what to obfuscate.
    #####################
    arg_parser.add_argument(
        '--forum',
        help='Read in and obfuscate forum posts.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--wiki',
        help='Read in and obfuscate wiki documents.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--courseware',
        help='Read in and obfuscate courseware_studentmodule records.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--event',
        help='Read in and obfuscate events.',
        action='store_true',
    )

    #####################
    # Various flags to indicate what to look for.
    #####################
    arg_parser.add_argument(
        '--phone',
        help='Extract phone numbers',
        action='store_true',
    )
    arg_parser.add_argument(
        '--possible-phone',
        help='Extract phone numbers',
        action='store_true',
    )
    arg_parser.add_argument(
        '--email',
        help='Extract email addresses',
        action='store_true',
    )
    arg_parser.add_argument(
        '--phone-context',
        help='Extract phone number context',
        action='store_true',
    )
    arg_parser.add_argument(
        '--email-context',
        help='Extract email address context',
        action='store_true',
    )
    arg_parser.add_argument(
        '--name-context',
        help='Extract name context',
        action='store_true',
    )
    arg_parser.add_argument(
        '--facebook',
        help='Extract facebook urls',
        action='store_true',
    )
    arg_parser.add_argument(
        '--username',
        help='Extract username',
        action='store_true',
    )
    arg_parser.add_argument(
        '--fullname',
        help='Extract fullname.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--userid',
        help='Extract user-id.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--skip-post',
        help='Skip performing filtering on event.POST entries.',
        action='store_true',
    )
    arg_parser.add_argument(
        '--pyinstrument',
        help='Profile the run and write the output to stderr',
        action='store_true'
    )
    args = arg_parser.parse_args()
    kwargs = vars(args)

    profiler = None
    if args.pyinstrument:
        profiler = Profiler()  # or Profiler(use_signal=False), see below
        profiler.start()

    try:
        obfuscator = BulkObfuscator(**kwargs)
        obfuscator.obfuscate_directory(args.input, args.output)
    finally:
        if profiler:
            profiler.stop()
            print >>sys.stderr, profiler.output_text(unicode=True, color=True)
Esempio n. 51
0
def observe(run_name=run_name, start_time='2018-01-01 04:00:00',
            weather_year=None, survey_duration=1 * u.hour):

    if profile:
        profiler = Profiler()
        profiler.start()

    survey_start_time = Time(start_time, scale='utc', location=P48_loc)

    tel = ZTFStateMachine(
        current_time=survey_start_time,
        historical_observability_year=weather_year)

    # set up QueueManager
    Q = GreedyQueueManager()

    # set up Observing Programs
    #CollabOP = CollaborationObservingProgram()
    # Q.add_observing_program(CollabOP)
    MSIPOP = MSIPObservingProgram(
        Q.fields.select_field_ids(dec_range=[-30, 90], grid_id=0))
    MSIPOP.observing_time_fraction = 1.0
    Q.add_observing_program(MSIPOP)
    #CaltechOP = CaltechObservingProgram()
    # Q.add_observing_program(CaltechOP)

    # initialize nightly field requests (Tom Barlow function)
    Q.assign_nightly_requests(tel.current_state_dict())

    # temporary loading to test things
    # Q.rp.add_requests(1,
    #        Q.fields.fields[
    #            Q.fields.select_fields(dec_range=[-30,90])].index, 2,
    #        'no_cadence',{})

    # initialize sqlite history
    log = ObsLogger(run_name, tel.current_time)
    log.create_pointing_log(clobber=True)

    while tel.current_time < (survey_start_time + survey_duration):

        # TODO: reload queue with new requests on update interval (nightly

        if tel.check_if_ready():
            current_state = tel.current_state_dict()
            # get coords
            next_obs = Q.next_obs(current_state)

            # TODO: filter change, if needed

            if not tel.start_slew(coord.SkyCoord(next_obs['target_ra'] * u.deg,
                                                 next_obs['target_dec'] * u.deg)):
                tel.set_cant_observe()
                # TODO: log the failure
                # "missed history": http://ops2.lsst.org/docs/current/architecture.html#output-tables
                log.prev_obs = None
                tel.wait()
                continue
            if not tel.start_exposing():
                tel.set_cant_observe()
                # TODO: log the failure
                log.prev_obs = None
                tel.wait()
                continue
            else:
                # exposure completed successfully.  now
                # a) store exposure information in pointing history sqlite db
                current_state = tel.current_state_dict()
                log.log_pointing(current_state, next_obs)
                # b) update Fields
                Q.fields.mark_field_observed(next_obs, current_state)
                # c) remove completed request_id
                Q.rp.remove_requests(next_obs['request_id'])
        else:
            tel.set_cant_observe()
            tel.wait()

    if profile:
        profiler.stop()
        print profiler.output_text(unicode=True, color=True)
Esempio n. 52
0
def main():
    usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
    version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
        v=pyinstrument.__version__,
        pyv=sys.version_info,
    )
    parser = optparse.OptionParser(usage=usage, version=version_string)
    parser.allow_interspersed_args = False

    def dash_m_callback(option, opt, value, parser):
        parser.values.module_name = value
        # everything after the -m argument should be passed to that module
        parser.values.module_args = parser.rargs + parser.largs
        parser.rargs[:] = []
        parser.largs[:] = []

    parser.add_option('', '--load-prev',
        dest='load_prev', action='store', metavar='ID',
        help="Instead of running a script, load a previous report")

    parser.add_option('-m', '',
        dest='module_name', action='callback', callback=dash_m_callback,
        type="str",
        help="run library module as a script, like 'python -m module'")

    parser.add_option('-o', '--outfile',
        dest="outfile", action='store',
        help="save to <outfile>", default=None)

    parser.add_option('-r', '--renderer',
        dest='renderer', action='store', type='string',
        help=("how the report should be rendered. One of: 'text', 'html', 'json', or python "
              "import path to a renderer class"),
        default='text')

    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help=optparse.SUPPRESS_HELP, default=False)  # deprecated shortcut for --renderer=html

    parser.add_option('-t', '--timeline',
        dest='timeline', action='store_true',
        help="render as a timeline - preserve ordering and don't condense repeated calls")

    parser.add_option('', '--hide',
        dest='hide_fnmatch', action='store', metavar='EXPR',
        help=("glob-style pattern matching the file paths whose frames to hide. Defaults to "
              "'*{sep}lib{sep}*'.").format(sep=os.sep),
        default='*{sep}lib{sep}*'.format(sep=os.sep))
    parser.add_option('', '--hide-regex',
        dest='hide_regex', action='store', metavar='REGEX',
        help=("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
              "enough control."))

    parser.add_option('', '--show',
        dest='show_fnmatch', action='store', metavar='EXPR',
        help=("glob-style pattern matching the file paths whose frames to "
              "show, regardless of --hide or --hide-regex. For example, use "
              "--show '*/<library>/*' to show frames within a library that "
              "would otherwise be hidden."))
    parser.add_option('', '--show-regex',
        dest='show_regex', action='store', metavar='REGEX',
        help=("regex matching the file paths whose frames to always show. "
              "Useful if --show doesn't give enough control."))
    parser.add_option('', '--show-all',
        dest='show_all', action='store_true',
        help="show everything", default=False)

    parser.add_option('', '--unicode',
        dest='unicode', action='store_true',
        help='(text renderer only) force unicode text output')
    parser.add_option('', '--no-unicode',
        dest='unicode', action='store_false',
        help='(text renderer only) force ascii text output')

    parser.add_option('', '--color',
        dest='color', action='store_true',
        help='(text renderer only) force ansi color text output')
    parser.add_option('', '--no-color',
        dest='color', action='store_false',
        help='(text renderer only) force no color text output')

    if not sys.argv[1:]:
        parser.print_help()
        sys.exit(2)

    options, args = parser.parse_args()

    if args == [] and options.module_name is None and options.load_prev is None:
        parser.print_help()
        sys.exit(2)

    if not options.hide_regex:
        options.hide_regex = fnmatch.translate(options.hide_fnmatch)
    
    if not options.show_regex and options.show_fnmatch:
        options.show_regex = fnmatch.translate(options.show_fnmatch)
     
    if options.show_all:
        options.show_regex = r'.*'

    if options.load_prev:
        session = load_report(options.load_prev)
    else:
        if options.module_name is not None:
            sys.argv[:] = [options.module_name] + options.module_args
            code = "run_module(modname, run_name='__main__')"
            globs = {
                'run_module': runpy.run_module,
                'modname': options.module_name
            }
        else:
            sys.argv[:] = args
            progname = args[0]
            sys.path.insert(0, os.path.dirname(progname))
            with open(progname, 'rb') as fp:
                code = compile(fp.read(), progname, 'exec')
            globs = {
                '__file__': progname,
                '__name__': '__main__',
                '__package__': None,
            }

        profiler = Profiler()

        profiler.start()

        try:
            exec_(code, globs, None)
        except (SystemExit, KeyboardInterrupt):
            pass

        profiler.stop()
        session = profiler.last_session

    if options.output_html:
        options.renderer = 'html'

    output_to_temp_file = (options.renderer == 'html'
                           and not options.outfile
                           and file_is_a_tty(sys.stdout))

    if options.outfile:
        f = codecs.open(options.outfile, 'w', 'utf-8')
        should_close_f_after_writing = True
    elif not output_to_temp_file:
        if PY2:
            f = codecs.getwriter('utf-8')(sys.stdout)
        else:
            f = sys.stdout
        should_close_f_after_writing = False

    renderer_kwargs = {'processor_options': {
        'hide_regex': options.hide_regex,
        'show_regex': options.show_regex,
    }}

    if options.timeline is not None:
        renderer_kwargs['timeline'] = options.timeline

    if options.renderer == 'text':
        unicode_override = options.unicode != None
        color_override = options.color != None
        unicode = options.unicode if unicode_override else file_supports_unicode(f)
        color = options.color if color_override else file_supports_color(f)
        
        renderer_kwargs.update({'unicode': unicode, 'color': color})

    renderer_class = get_renderer_class(options.renderer)
    renderer = renderer_class(**renderer_kwargs)

    # remove this frame from the trace
    renderer.processors.append(remove_first_pyinstrument_frame_processor)


    if output_to_temp_file:
        output_filename = renderer.open_in_browser(session)
        print('stdout is a terminal, so saved profile output to %s' % output_filename)
    else:
        f.write(renderer.render(session))
        if should_close_f_after_writing:
            f.close()

    if options.renderer == 'text':
        _, report_identifier = save_report(session)
        print('To view this report with different options, run:')
        print('    pyinstrument --load-prev %s [options]' % report_identifier)
        print('')