Beispiel #1
0
    def ekklesia_log_tween(request):
        request_data = {'url': request.url, 'headers': dict(request.headers)}

        user = request.current_user

        if user is not None:
            request_data['user'] = user.id

        with start_task(action_type='request', request=request_data) as task:
            try:
                if print_sql_statements:
                    history = request.db_session.connection(
                    ).connection.connection.history
                    history.clear()
                response = handler(request)

                if print_sql_statements:
                    print()
                    print(f"{SQL_PRINT_PREFIX}SQL statements for this request")
                    history.print_statements(prefix=SQL_PRINT_PREFIX)
                    print(
                        f"{SQL_PRINT_PREFIX}{len(history)} SQL statements, duration {history.overall_duration_ms():.2f}ms"
                    )
                    print()
                return response
            except HTTPError:
                # Let Morepath handle this (exception views).
                raise
            except Exception as e:
                # Something else failed, wrap the exception and add metadata for better error reporting.
                datetime_now = datetime.now()
                suffix = task.task_uuid[:7]
                xid = exception_uid(e, datetime_now, suffix)
                raise UnhandledRequestException(task.task_uuid, xid) from e
def main(csv_filepath: str):
    with start_task(action_type="update_users"):
        # Clear missing display names file
        open(settings.missing_display_name_file, 'w').close()

        user_updates = prepare_user_updates(csv_filepath)
        update_keycloak_users(user_updates)
Beispiel #3
0
def get_content(ctx, key):
    with start_task(action_type="fetch pad"):
        with pad_api(ctx) as api:
            api.open_pad(key)
            content = api.get_pad_content()

    echo(content)
Beispiel #4
0
def bg(wait, command, raweliot, targets):
    """Run the default oam operation on targets"""
    if raweliot:
        eliot.to_file(sys.stdout)
    else:
        # eliottree.render_tasks(sys.stdout.write, tasks, colorize=True) #py3
        eliot.add_destination(render_stdout)
    procs = []
    if len(targets)==0:
        targets = ['localhost']
    with eliot.start_task(action_type='run_ops', targets=targets):
        with eliot.start_action(action_type='start_ops', targets=targets):
            for server in targets:
                if wait:
                    cmd = FG_CMD.format(server, command)
                else:
                    cmd = BG_CMD.format(server, SESSION_NAME, command)
                logging.debug('%s start, cmd: %s', server, cmd)
                with eliot.start_action(action_type='start_process', target=server, cmd=cmd):
                    procs.append(subprocess.Popen(cmd, shell=True))
        finished = 0
        with eliot.start_action(action_type='wait_finishes', targets=targets):
            while finished != len(procs):
                for index, server in enumerate(procs):
                    logging.debug('looping at %s %d', targets[index], finished)
                    if not server.poll() is None:
                        eliot.Message.log(message_type='finish', target=targets[index])
                        finished += 1
                time.sleep(1)
        with eliot.start_action(action_type='wait_terminations', targets=targets):
            for index, server in enumerate(procs):
                with eliot.start_action(action_type='wait_process', target=targets[index]):
                    server.wait()
                    logging.debug('%s finish, returncode=%d', targets[index], server.returncode)
Beispiel #5
0
def set_content(ctx, key, infile):
    with start_task(action_type="set pad"):
        content = read_infile(infile)

        with pad_api(ctx) as api:
            api.open_pad(key)
            api.set_pad_content(content)
Beispiel #6
0
def update(json_filepath: str):
    with start_task(action_type="update_departments"):
        departments = load_departments_json(json_filepath)

        keycloak_admin = create_keycloak_admin_client()

        create_department_group(keycloak_admin, departments)
def _run_task(rule_name, master_id):
    del Logger._destinations._destinations[:]
    to_file(open(os.path.join(eliot_log_path, master_id), "ab"))

    with start_task(action_type="invenio_checker:supervisor:_run_task",
                    master_id=master_id) as eliot_task:
        from .models import CheckerRule
        # cleanup_failed_runs()

        redis_master = None

        def cleanup_session():
            print 'Cleaning up'
            if redis_master is not None:
                redis_master.zap()

        def sigint_hook(rcv_signal, frame):
            cleanup_session()

        def except_hook(type_, value, tback):
            cleanup_session()
            reraise(type_, value, tback)

        signal.signal(signal.SIGINT, sigint_hook)
        signal.signal(signal.SIGTERM, sigint_hook)
        sys.excepthook = except_hook

        with start_action(action_type='create master'):
            eliot_task_id = eliot_task.serialize_task_id()
            redis_master = RedisMaster(master_id, eliot_task_id, rule_name)

        with start_action(action_type='create subtasks'):
            rules = CheckerRule.from_ids((rule_name,))
            bundles = rules_to_bundles(rules, redis_master.all_recids)

            subtasks = []
            errback = handle_error.s()
            for rule, rule_chunks in bundles.iteritems():
                for chunk in rule_chunks:
                    task_id = uuid()
                    redis_master.workers_append(task_id)
                    eliot_task_id = eliot_task.serialize_task_id()
                    RedisWorker(task_id, eliot_task_id, chunk)
                    subtasks.append(run_test.subtask(args=(rule.filepath,
                                                           redis_master.master_id,
                                                           task_id),
                                                     task_id=task_id,
                                                     link_error=[errback]))

            Message.log(message_type='registered subtasks', value=str(redis_master.workers))

        with start_action(action_type='run chord'):
            redis_master.status = StatusMaster.running
            header = subtasks
            callback = handle_results.subtask(link_error=[handle_errors.s(redis_master.master_id)])
            my_chord = chord(header)
            result = my_chord(callback)
            redis_master.status = StatusMaster.running
def flog_gatherer(reactor, base_dir, tahoe_venv, flog_binary, request):
    if not request.config.getoption("gather_foolscap_logs"):
        return ""

    with start_task(action_type=u"integration:flog_gatherer"):
        out_protocol = _CollectOutputProtocol()
        gather_dir = join(base_dir, 'flog_gather')
        reactor.spawnProcess(
            out_protocol,
            flog_binary,
            (
                u'flogtool', u'create-gatherer',
                u'--location', u'tcp:localhost:3117',
                u'--port', u'3117',
                gather_dir,
            )
        )
        pytest_twisted.blockon(out_protocol.done)

        magic_text = "Gatherer waiting at"
        executable = str(tahoe_venv.bin('twistd'))
        args = (
            'twistd', '--nodaemon', '--python',
            join(gather_dir, 'gatherer.tac'),
        )
        action_fields = {
            "action_type": u"integration:flog-gatherer:service",
        }
        pytest_twisted.blockon(
            run_service(reactor, request, action_fields, magic_text, executable, args, cwd=gather_dir)
        )

        def cleanup():
            flog_file = 'integration.flog_dump'
            flog_protocol = _DumpOutputProtocol(open(flog_file, 'w'))
            flogs = [x for x in listdir(gather_dir) if x.endswith('.flog')]

            print("Dumping {} flogtool logfiles to '{}'".format(len(flogs), flog_file))
            reactor.spawnProcess(
                flog_protocol,
                flog_binary,
                (
                    'flogtool', 'dump', join(gather_dir, flogs[0])
                ),
            )
            print("Waiting for flogtool to complete")
            try:
                pytest_twisted.blockon(flog_protocol.done)
            except ProcessTerminated as e:
                print("flogtool exited unexpectedly: {}".format(str(e)))
            print("Flogtool completed")

        request.addfinalizer(cleanup)

        with open(join(gather_dir, 'log_gatherer.furl'), 'r') as f:
            furl = f.read().strip()
        return furl
Beispiel #9
0
    def ekklesia_log_tween(request):
        request_data = {'url': request.url, 'headers': dict(request.headers)}

        user = request.current_user

        if user is not None:
            request_data['user'] = user.id

        with start_task(action_type='request', request=request_data):
            return handler(request)
Beispiel #10
0
def create(ctx, infile):
    with start_task(action_type="create pad"):
        args = []
        if infile:
            args.append(read_infile(infile))

        with pad_api(ctx) as api:
            pad_info = api.create_pad(*args)

    echo(pad_info["url"])
    def _script_io() -> None:
        from pathlib import Path

        _configure_logging(Path('log/eliot.log'))

        with el.start_task(action_type='luigi.build'):
            luigi.build([NAACCR_Load()], local_scheduler=True)

        if False:  # static check
            MigrateUpload()
 def startService(self):
     if self.task_fields:
         self.task = start_task(**self.task_fields)
         self.task.__enter__()
     if self.capture_logs:
         self.stdlib_cleanup = _stdlib_logging_to_eliot_configuration(
             getLogger())
         self.twisted_observer = _TwistedLoggerToEliotObserver()
         globalLogPublisher.addObserver(self.twisted_observer)
     add_destinations(*self.destinations)
     return Service.startService(self)
Beispiel #13
0
    def __task_factory(_loop, coro):
        """

        Args:
            loop (Loop):
            coro ():

        Returns:
            asyncio.tasks.Task: Task
        """

        # TODO: Sanity check for _loop to be same sa Taskmanager._loop
        with start_task(action_type="TaskFactory", loop=_loop):
            task = Task(coro, name=None).get_task()
            _update_source_traceback(task)

        return task
Beispiel #14
0
async def send_templated_message(
    msg: TemplatedMessage,
    client_settings: ClientSettings = Depends(identify_client)):

    with start_task(task="send_templated_message") as task:

        recipient_info = decode_recipient_info(
            msg.recipient_info, msg.sender or client_settings.default_sender)

        failed_transports = []
        successful_transports = []

        for transport_id, recipient in recipient_info.transports.items():
            transport = TRANSPORTS[transport_id]
            await transport.connect()
            try:
                await transport.send_templated_message(msg, recipient,
                                                       client_settings)
            except SendFailed:
                failed_transports.append(transport)
            else:
                successful_transports.append(transport)

            await transport.disconnect()

        msg_id = str(ULID())

        task.add_success_fields(
            msg_id=msg_id,
            failed_transports=[t.transport_name for t in failed_transports],
            successful_transports=[
                t.transport_name for t in successful_transports
            ])

    if not successful_transports:
        transports_failed = TransportsFailed.ALL
    elif failed_transports:
        transports_failed = TransportsFailed.SOME
    else:
        transports_failed = TransportsFailed.NONE

    return MessageResponse(msg_id=msg_id, transports_failed=transports_failed)
Beispiel #15
0
def introducer(reactor, tahoe_venv, base_dir, flog_gatherer, request):
    with start_task(action_type=u"integration:introducer").context():
        config = '''
[node]
nickname = introducer0
web.port = 4560
log_gatherer.furl = {log_furl}
tub.port = tcp:9321
tub.location = tcp:localhost:9321
'''.format(log_furl=flog_gatherer)

        intro_dir = join(base_dir, 'introducer')
        print("making introducer", intro_dir)

        if not exists(intro_dir):
            mkdir(intro_dir)
            done_proto = _ProcessExitedProtocol()
            _tahoe_runner(
                done_proto,
                reactor,
                tahoe_venv,
                request,
                (
                    'create-introducer',
                    '--listen=tcp',
                    '--hostname=localhost',
                    intro_dir,
                ),
            )
            pytest_twisted.blockon(done_proto.done)

        # over-write the config file with our stuff
        with open(join(intro_dir, 'tahoe.cfg'), 'w') as f:
            f.write(config)

        magic_text = 'introducer running'
        action_fields = {
            "action_type": u"integration:introducer:service",
        }
        return pytest_twisted.blockon(
            run_tahoe_service(reactor, request, action_fields, magic_text, tahoe_venv, intro_dir)
        )
Beispiel #16
0
def honeymoon(family, destination):
    with start_task(action_type="honeymoon", people=family):
        destination.visited(family)
Beispiel #17
0
    from ekklesia_portal.app import make_wsgi_app

    app = make_wsgi_app(args.config_file)

    from ekklesia_portal.datamodel import Ballot, Department, Proposition, PropositionType, \
        User, VotingPhase, Supporter, Tag
    from ekklesia_common.database import Session

    session = Session()

    sqlalchemy.orm.configure_mappers()

    failed_propositions = {}

    for fp in args.filenames:
        with start_task(log_level="INFO", action_type="import_proposition"):
            try:
                imported_data = load_proposition_json_file(fp)
                insert_proposition(args.department, args.voting_phase,
                                   **imported_data)
            except MissingFieldsException as e:
                failed_propositions[fp] = e.args[0]
            except:
                write_traceback()

    if set(args.filenames) - set(failed_propositions):
        transaction.commit()

    if failed_propositions:
        Message.log(log_level="ERROR",
                    message_type="failed_propositions",
Beispiel #18
0
def honeymoon(family, destination):
    with start_task(action_type="honeymoon", people=family):
        destination.visited(family)
def task_action(task: luigi.Task, method: str) -> Iterator[el.Action]:
    with el.start_task(action_type=f'{task.task_family}.{method}',
                       task_id=task.task_id,
                       **task.to_str_params(only_significant=True,
                                            only_public=True)) as ctx:
        yield ctx
Beispiel #20
0
def _run_task(rule_name, master_id):

    # # If you find yourself debugging celery crashes:
    # redis_master = None
    # def cleanup_session():
    #     if redis_master is not None:
    #         redis_master.zap()
    # def sigint_hook(rcv_signal, frame):
    #     cleanup_session()
    # def except_hook(type_, value, tback):
    #     cleanup_session()
    #     reraise(type_, value, tback)
    # signal.signal(signal.SIGINT, sigint_hook)
    # signal.signal(signal.SIGTERM, sigint_hook)
    # sys.excepthook = except_hook

    clear_logger_destinations(Logger)
    to_file(get_eliot_log_file(master_id=master_id))
    with start_task(action_type="invenio_checker:supervisor:_run_task",
                    master_id=master_id) as eliot_task:
        eliot_task_id = eliot_task.serialize_task_id()

        # Have the master initialize its presence in redis.
        Message.log(message_type='creating master')
        redis_master = RedisMaster.create(master_id, eliot_task_id, rule_name)

        # Load the rule from its name. `run_task` has already checked that it's
        # there.
        rule = CheckerRule.query.filter(CheckerRule.name == rule_name).one()
        Message.log(message_type='loaded rule', rule_name=rule.name)

        # Create workers to attach to this master. `record_centric` means that
        # the task uses the `record` fixture, which causes pytest to loop over
        # it len(chunk_recids) times. This is important to know now so that we
        # will spawn multiple workers.
        subtasks = []
        record_centric = _get_record_fixture_presence(rule.filepath)

        if record_centric:
            # We wish to spawn multiple workers to split the load.
            if rule.allow_chunking:
                recid_chunks = tuple(
                    chunk_recids(rule.modified_requested_recids))
            else:
                recid_chunks = (rule.modified_requested_recids, )
            Message.log(message_type='creating subtasks',
                        count=len(recid_chunks),
                        mode='record_centric',
                        recid_count=len(rule.modified_requested_recids))
        else:
            # We wish to spawn just one worker than will run the check function
            # once.
            recid_chunks = (set(), )
            Message.log(message_type='creating subtasks',
                        count=1,
                        mode='not_record_centric')

        # Create the subtasks based on the decisions taken above and inform the
        # master of its associations with these new workers/tasks.
        for chunk in recid_chunks:
            task_id = uuid()
            redis_master.workers_append(task_id)
            subtasks.append(
                create_celery_task(task_id, redis_master.master_id, rule,
                                   chunk, eliot_task))

        if not subtasks:
            # Note that if `record-centric` is True, there's the chance that no
            # records matched our query. This does not imply a problem.
            redis_master.status = StatusMaster.completed
        else:
            redis_master.status = StatusMaster.running
            # FIXME: handle_all_completion should be called after the callbacks
            # of all workers have completed.
            callback = handle_all_completion.subtask()
            chord(subtasks)(callback)
Beispiel #21
0
    def create(
        cls,
        reactor,
        tahoe_venv,
        request,
        base_dir,
        introducer_furl,
        flog_gatherer,
        name,
        tahoe_web_port,
        magic_folder_web_port,
        storage,
    ):
        """
        Launch the two processes and return a new ``MagicFolderEnabledNode``
        referencing them.

        Note this depends on pytest/Twisted integration for magical blocking.

        :param reactor: The reactor to use to launch the processes.
        :param tahoe_venv: Directory where our virtualenv is located.
        :param request: The pytest request object to use for cleanup.
        :param bytes base_dir: A directory beneath which to place the
            Tahoe-LAFS node.
        :param bytes introducer_furl: The introducer fURL to configure the new
            Tahoe-LAFS node with.
        :param bytes flog_gatherer: The flog gatherer fURL to configure the
            new Tahoe-LAFS node with.
        :param bytes name: A nickname to assign the new Tahoe-LAFS node.
        :param bytes tahoe_web_port: An endpoint description of the web port
            for the new Tahoe-LAFS node to listen on.
        :param bytes magic_folder_web_port: An endpoint description of the web
            port for the new magic-folder process to listen on.
        :param bool storage: True if the node should offer storage, False
            otherwise.
        """
        with start_task(action_type=u"integration:magic-folder-node",
                        node=name).context() as action:
            # We want to last until the session fixture using it ends (so we
            # can capture output from every process associated to this node).
            # Thus we use `.context()` above so this with-block doesn't finish
            # the action, and add a finalizer to finish it (first, since
            # finalizers are a stack).
            request.addfinalizer(action.finish)
            # Make the Tahoe-LAFS node process
            tahoe = yield _create_node(
                reactor,
                tahoe_venv,
                request,
                base_dir,
                introducer_furl,
                flog_gatherer,
                name,
                tahoe_web_port,
                storage,
                needed=1,
                happy=1,
                total=1,
            )
            yield await_client_ready(reactor, tahoe)

            # Create the magic-folder daemon config
            yield _init_magic_folder(
                reactor,
                request,
                base_dir,
                name,
                magic_folder_web_port,
            )

            # Run the magic-folder daemon
            magic_folder = yield _run_magic_folder(
                reactor,
                request,
                base_dir,
                name,
            )

        returnValue(
            cls(
                reactor,
                request,
                base_dir,
                name,
                action,
                tahoe,
                magic_folder,
                magic_folder_web_port,
            ))
Beispiel #22
0
def parse(json_filepath: str, csv_filepath: str):
    with start_task(action_type="parse_departments"):
        departments = parse_departments(csv_filepath)
        write_departments_json(departments, json_filepath)
Beispiel #23
0
def _run_task(rule_name, master_id):

    # # If you find yourself debugging celery crashes:
    # redis_master = None
    # def cleanup_session():
    #     if redis_master is not None:
    #         redis_master.zap()
    # def sigint_hook(rcv_signal, frame):
    #     cleanup_session()
    # def except_hook(type_, value, tback):
    #     cleanup_session()
    #     reraise(type_, value, tback)
    # signal.signal(signal.SIGINT, sigint_hook)
    # signal.signal(signal.SIGTERM, sigint_hook)
    # sys.excepthook = except_hook

    clear_logger_destinations(Logger)
    to_file(get_eliot_log_file(master_id=master_id))
    with start_task(action_type="invenio_checker:supervisor:_run_task",
                    master_id=master_id) as eliot_task:
        eliot_task_id = eliot_task.serialize_task_id()

        # Have the master initialize its presence in redis.
        Message.log(message_type='creating master')
        redis_master = RedisMaster.create(master_id, eliot_task_id, rule_name)

        # Load the rule from its name. `run_task` has already checked that it's
        # there.
        rule = CheckerRule.query.filter(CheckerRule.name == rule_name).one()
        Message.log(message_type='loaded rule', rule_name=rule.name)

        # Create workers to attach to this master. `record_centric` means that
        # the task uses the `record` fixture, which causes pytest to loop over
        # it len(chunk_recids) times. This is important to know now so that we
        # will spawn multiple workers.
        subtasks = []
        record_centric = _get_record_fixture_presence(rule.filepath)

        if record_centric:
            # We wish to spawn multiple workers to split the load.
            if rule.allow_chunking:
                recid_chunks = tuple(chunk_recids(rule.modified_requested_recids))
            else:
                recid_chunks = (rule.modified_requested_recids,)
            Message.log(message_type='creating subtasks', count=len(recid_chunks),
                        mode='record_centric', recid_count=len(rule.modified_requested_recids))
        else:
            # We wish to spawn just one worker than will run the check function
            # once.
            recid_chunks = (set(),)
            Message.log(message_type='creating subtasks', count=1,
                        mode='not_record_centric')

        # Create the subtasks based on the decisions taken above and inform the
        # master of its associations with these new workers/tasks.
        for chunk in recid_chunks:
            task_id = uuid()
            redis_master.workers_append(task_id)
            subtasks.append(create_celery_task(task_id, redis_master.master_id,
                                               rule, chunk, eliot_task))

        if not subtasks:
            # Note that if `record-centric` is True, there's the chance that no
            # records matched our query. This does not imply a problem.
            redis_master.status = StatusMaster.completed
        else:
            redis_master.status = StatusMaster.running
            # FIXME: handle_all_completion should be called after the callbacks
            # of all workers have completed.
            callback = handle_all_completion.subtask()
            chord(subtasks)(callback)
def main(csv_filepath: str):
    with start_task(action_type="create_users", csv_filepath=csv_filepath):
        users_to_create = prepare_user_data(csv_filepath)
        create_keycloak_users(users_to_create)
def _run_task(rule_name, master_id):
    del Logger._destinations._destinations[:]
    to_file(open(os.path.join(eliot_log_path, master_id), "ab"))

    with start_task(action_type="invenio_checker:supervisor:_run_task",
                    master_id=master_id) as eliot_task:
        from .models import CheckerRule
        # cleanup_failed_runs()

        redis_master = None

        def cleanup_session():
            print 'Cleaning up'
            if redis_master is not None:
                redis_master.zap()

        def sigint_hook(rcv_signal, frame):
            cleanup_session()

        def except_hook(type_, value, tback):
            cleanup_session()
            reraise(type_, value, tback)

        signal.signal(signal.SIGINT, sigint_hook)
        signal.signal(signal.SIGTERM, sigint_hook)
        sys.excepthook = except_hook

        with start_action(action_type='create master'):
            eliot_task_id = eliot_task.serialize_task_id()
            redis_master = RedisMaster(master_id, eliot_task_id, rule_name)

        with start_action(action_type='create subtasks'):
            rules = CheckerRule.from_ids((rule_name, ))
            bundles = rules_to_bundles(rules, redis_master.all_recids)

            subtasks = []
            errback = handle_error.s()
            for rule, rule_chunks in bundles.iteritems():
                for chunk in rule_chunks:
                    task_id = uuid()
                    redis_master.workers_append(task_id)
                    eliot_task_id = eliot_task.serialize_task_id()
                    RedisWorker(task_id, eliot_task_id, chunk)
                    subtasks.append(
                        run_test.subtask(args=(rule.filepath,
                                               redis_master.master_id,
                                               task_id),
                                         task_id=task_id,
                                         link_error=[errback]))

            Message.log(message_type='registered subtasks',
                        value=str(redis_master.workers))

        with start_action(action_type='run chord'):
            redis_master.status = StatusMaster.running
            header = subtasks
            callback = handle_results.subtask(
                link_error=[handle_errors.s(redis_master.master_id)])
            my_chord = chord(header)
            result = my_chord(callback)
            redis_master.status = StatusMaster.running
Beispiel #26
0
def eliot_log_test(request):
    with start_task(action_type="integration:pytest", test=str(request.node.nodeid)):
        yield