예제 #1
0
def test_get_start_time(handler, expected, body, patch):
    origbody = copy.deepcopy(body)
    origpatch = copy.deepcopy(patch)
    result = get_start_time(body=body, patch=patch, handler=handler)
    assert result == expected
    assert body == origbody  # not modified
    assert patch == origpatch  # not modified
예제 #2
0
async def _execute(
    lifecycle: Callable,
    handlers: Collection[registries.Handler],
    cause: causation.Cause,
    retry_on_errors: bool = True,
) -> None:
    """
    Call the next handler(s) from the chain of the handlers.

    Keep the record on the progression of the handlers in the object's status,
    and use it on the next invocation to determined which handler(s) to call.

    This routine is used both for the global handlers (via global registry),
    and for the sub-handlers (via a simple registry of the current handler).

    Raises `HandlerChildrenRetry` if there are children handlers to be executed
    on the next call, and implicitly provokes such a call by making the changes
    to the status fields (on the handler progression and number of retries).

    Exits normally if all handlers for this cause are fully done.
    """
    logger = cause.logger

    # Filter and select the handlers to be executed right now, on this event reaction cycle.
    handlers_done = [
        h for h in handlers if status.is_finished(body=cause.body, handler=h)
    ]
    handlers_wait = [
        h for h in handlers if status.is_sleeping(body=cause.body, handler=h)
    ]
    handlers_todo = [
        h for h in handlers if status.is_awakened(body=cause.body, handler=h)
    ]
    handlers_plan = [
        h
        for h in await invocation.invoke(lifecycle, handlers_todo, cause=cause)
    ]
    handlers_left = [
        h for h in handlers_todo if h.id not in {h.id
                                                 for h in handlers_plan}
    ]

    # Set the timestamps -- even if not executed on this event, but just got registered.
    for handler in handlers:
        if not status.is_started(body=cause.body, handler=handler):
            status.set_start_time(body=cause.body,
                                  patch=cause.patch,
                                  handler=handler)

    # Execute all planned (selected) handlers in one event reaction cycle, even if there are few.
    for handler in handlers_plan:

        # Restore the handler's progress status. It can be useful in the handlers.
        retry = status.get_retry_count(body=cause.body, handler=handler)
        started = status.get_start_time(body=cause.body,
                                        handler=handler,
                                        patch=cause.patch)
        runtime = datetime.datetime.utcnow() - started

        # The exceptions are handled locally and are not re-raised, to keep the operator running.
        try:
            logger.debug(f"Invoking handler {handler.id!r}.")

            if handler.timeout is not None and runtime.total_seconds(
            ) > handler.timeout:
                raise HandlerTimeoutError(
                    f"Handler {handler.id!r} has timed out after {runtime}.")

            result = await _call_handler(
                handler,
                cause=cause,
                retry=retry,
                started=started,
                runtime=runtime,
                lifecycle=
                lifecycle,  # just a default for the sub-handlers, not used directly.
            )

        # Unfinished children cause the regular retry, but with less logging and event reporting.
        except HandlerChildrenRetry as e:
            logger.info(
                f"Handler {handler.id!r} has unfinished sub-handlers. Will retry soon."
            )
            status.set_retry_time(body=cause.body,
                                  patch=cause.patch,
                                  handler=handler,
                                  delay=e.delay)
            handlers_left.append(handler)

        # Definitely retriable error, no matter what is the error-reaction mode.
        except HandlerRetryError as e:
            logger.exception(
                f"Handler {handler.id!r} failed with a retry exception. Will retry."
            )
            await events.exception_async(
                cause.body,
                message=f"Handler {handler.id!r} failed. Will retry.")
            status.set_retry_time(body=cause.body,
                                  patch=cause.patch,
                                  handler=handler,
                                  delay=e.delay)
            handlers_left.append(handler)

        # Definitely fatal error, no matter what is the error-reaction mode.
        except HandlerFatalError as e:
            logger.exception(
                f"Handler {handler.id!r} failed with a fatal exception. Will stop."
            )
            await events.exception_async(
                cause.body,
                message=f"Handler {handler.id!r} failed. Will stop.")
            status.store_failure(body=cause.body,
                                 patch=cause.patch,
                                 handler=handler,
                                 exc=e)
            # TODO: report the handling failure somehow (beside logs/events). persistent status?

        # Regular errors behave as either retriable or fatal depending on the error-reaction mode.
        except Exception as e:
            if retry_on_errors:
                logger.exception(
                    f"Handler {handler.id!r} failed with an exception. Will retry."
                )
                await events.exception_async(
                    cause.body,
                    message=f"Handler {handler.id!r} failed. Will retry.")
                status.set_retry_time(body=cause.body,
                                      patch=cause.patch,
                                      handler=handler,
                                      delay=DEFAULT_RETRY_DELAY)
                handlers_left.append(handler)
            else:
                logger.exception(
                    f"Handler {handler.id!r} failed with an exception. Will stop."
                )
                await events.exception_async(
                    cause.body,
                    message=f"Handler {handler.id!r} failed. Will stop.")
                status.store_failure(body=cause.body,
                                     patch=cause.patch,
                                     handler=handler,
                                     exc=e)
                # TODO: report the handling failure somehow (beside logs/events). persistent status?

        # No errors means the handler should be excluded from future runs in this reaction cycle.
        else:
            logger.info(f"Handler {handler.id!r} succeeded.")
            await events.info_async(
                cause.body,
                reason='Success',
                message=f"Handler {handler.id!r} succeeded.")
            status.store_success(body=cause.body,
                                 patch=cause.patch,
                                 handler=handler,
                                 result=result)

    # Provoke the retry of the handling cycle if there were any unfinished handlers,
    # either because they were not selected by the lifecycle, or failed and need a retry.
    if handlers_left:
        raise HandlerChildrenRetry(delay=None)

    # If there are delayed handlers, block this object's cycle; but do keep-alives every few mins.
    # Other (non-delayed) handlers will continue as normlally, due to raise few lines above.
    # Other objects will continue as normally in their own handling asyncio tasks.
    if handlers_wait:
        times = [
            status.get_awake_time(body=cause.body, handler=handler)
            for handler in handlers_wait
        ]
        until = min(times)  # the soonest awake datetime.
        delay = (until - datetime.datetime.utcnow()).total_seconds()
        delay = max(0, min(WAITING_KEEPALIVE_INTERVAL, delay))
        raise HandlerChildrenRetry(delay=delay)