示例#1
0
async def test_continuation_when_overdue(clock, sleep):
    wakeup = asyncio.Event()
    logger = logging.getLogger()
    throttler = Throttler()

    clock.return_value = 1000  # simulated "now"
    sleep.return_value = 55  # simulated sleep time left
    async with throttled(throttler=throttler,
                         logger=logger,
                         delays=[123, 234],
                         wakeup=wakeup):
        raise Exception()

    sleep.reset_mock()
    clock.return_value = 2000  # simulated "now"
    sleep.return_value = None  # simulated sleep time left
    async with throttled(throttler=throttler,
                         logger=logger,
                         delays=[...],
                         wakeup=wakeup):
        raise Exception()

    assert throttler.last_used_delay == 234
    assert throttler.source_of_delays is not None
    assert throttler.active_until is None  # means: no sleep time is left
    assert sleep.mock_calls == [
        call(123 - 1000, wakeup=wakeup),
        call(234, wakeup=wakeup)
    ]
示例#2
0
async def test_recommends_running_immediately_after_continued(sleep):
    logger = logging.getLogger()
    throttler = Throttler()

    sleep.return_value = 33  # simulated sleep time left
    async with throttled(throttler=throttler, logger=logger, delays=[123]):
        raise Exception()

    sleep.return_value = None  # simulated sleep time left
    async with throttled(throttler=throttler, logger=logger,
                         delays=[...]) as should_run:
        remembered_should_run = should_run

    assert remembered_should_run is True
示例#3
0
async def test_recommends_skipping_immediately_after_interrupted_error(sleep):
    logger = logging.getLogger()
    throttler = Throttler()

    sleep.return_value = 33  # simulated sleep time left
    async with throttled(throttler=throttler, logger=logger, delays=[123]):
        raise Exception()

    sleep.return_value = 33  # simulated sleep time left
    async with throttled(throttler=throttler, logger=logger,
                         delays=[...]) as should_run:
        remembered_should_run = should_run

    assert remembered_should_run is False
示例#4
0
async def test_resets_on_success(sleep):
    logger = logging.getLogger()
    throttler = Throttler()

    async with throttled(throttler=throttler, logger=logger, delays=[123]):
        raise Exception()

    sleep.reset_mock()
    async with throttled(throttler=throttler, logger=logger, delays=[...]):
        pass

    assert throttler.last_used_delay is None
    assert throttler.source_of_delays is None
    assert throttler.active_until is None
    assert sleep.mock_calls == []
示例#5
0
async def test_recommends_running_initially():
    logger = logging.getLogger()
    throttler = Throttler()
    async with throttled(throttler=throttler, logger=logger,
                         delays=[123]) as should_run:
        remembered_should_run = should_run
    assert remembered_should_run is True
示例#6
0
async def test_remains_inactive_on_success():
    logger = logging.getLogger()
    throttler = Throttler()
    async with throttled(throttler=throttler, logger=logger, delays=[123]):
        pass
    assert throttler.source_of_delays is None
    assert throttler.last_used_delay is None
示例#7
0
async def test_logging_when_deactivates_on_reentry(sleep, caplog):
    caplog.set_level(0)
    logger = logging.getLogger()
    throttler = Throttler()

    sleep.return_value = 55  # simulated sleep time left
    async with throttled(throttler=throttler, logger=logger, delays=[123]):
        raise Exception("boo!")

    sleep.return_value = None  # simulated sleep time left
    async with throttled(throttler=throttler, logger=logger, delays=[...]):
        pass

    assert caplog.messages == [
        "Throttling for 123 seconds due to an unexpected error: Exception('boo!')",
        "Throttling is over. Switching back to normal operations.",
    ]
示例#8
0
async def test_sleeps_for_the_next_delay_when_active(sleep):
    logger = logging.getLogger()
    throttler = Throttler()

    async with throttled(throttler=throttler, logger=logger, delays=[123,
                                                                     234]):
        raise Exception()

    sleep.reset_mock()
    async with throttled(throttler=throttler, logger=logger, delays=[...]):
        raise Exception()

    assert throttler.last_used_delay == 234
    assert throttler.source_of_delays is not None
    assert next(throttler.source_of_delays, 999) == 999

    assert throttler.active_until is None  # means: no sleep time left
    assert sleep.mock_calls == [call(234, wakeup=None)]
示例#9
0
async def test_escalates_unexpected_errors(exc_cls, kwargs):
    logger = logging.getLogger()
    throttler = Throttler()
    with pytest.raises(exc_cls):
        async with throttled(throttler=throttler,
                             logger=logger,
                             delays=[123],
                             **kwargs):
            raise exc_cls()
示例#10
0
async def test_renews_on_repeated_failure(sleep):
    logger = logging.getLogger()
    throttler = Throttler()

    async with throttled(throttler=throttler, logger=logger, delays=[123]):
        raise Exception()

    async with throttled(throttler=throttler, logger=logger, delays=[...]):
        pass

    sleep.reset_mock()
    async with throttled(throttler=throttler, logger=logger, delays=[234]):
        raise Exception()

    assert throttler.last_used_delay is 234
    assert throttler.source_of_delays is not None
    assert throttler.active_until is None
    assert sleep.mock_calls == [call(234, wakeup=None)]
示例#11
0
async def test_activates_on_expected_errors(exc_cls, kwargs):
    logger = logging.getLogger()
    throttler = Throttler()
    async with throttled(throttler=throttler,
                         logger=logger,
                         delays=[123],
                         **kwargs):
        raise exc_cls()
    assert throttler.source_of_delays is not None
    assert throttler.last_used_delay is not None
示例#12
0
async def test_logging_when_deactivates_immediately(caplog):
    caplog.set_level(0)
    logger = logging.getLogger()
    throttler = Throttler()

    async with throttled(throttler=throttler, logger=logger, delays=[123]):
        raise Exception("boo!")

    assert caplog.messages == [
        "Throttling for 123 seconds due to an unexpected error: Exception('boo!')",
        "Throttling is over. Switching back to normal operations.",
    ]
示例#13
0
async def test_skips_on_no_delays(sleep):
    logger = logging.getLogger()
    throttler = Throttler()

    async with throttled(throttler=throttler, logger=logger, delays=[]):
        raise Exception()

    assert throttler.last_used_delay is None
    assert throttler.source_of_delays is not None
    assert next(throttler.source_of_delays, 999) == 999

    assert throttler.active_until is None  # means: no sleep time left
    assert sleep.mock_calls == []
示例#14
0
async def test_interruption(clock, sleep):
    wakeup = asyncio.Event()
    logger = logging.getLogger()
    throttler = Throttler()

    clock.return_value = 1000  # simulated "now"
    sleep.return_value = 55  # simulated sleep time left
    async with throttled(throttler=throttler,
                         logger=logger,
                         delays=[123, 234],
                         wakeup=wakeup):
        raise Exception()

    assert throttler.last_used_delay == 123
    assert throttler.source_of_delays is not None
    assert throttler.active_until == 1123  # means: some sleep time is left
    assert sleep.mock_calls == [call(123, wakeup=wakeup)]
示例#15
0
async def process_resource_event(
        lifecycle: execution.LifeCycleFn,
        indexers: indexing.OperatorIndexers,
        registry: registries.OperatorRegistry,
        settings: configuration.OperatorSettings,
        memories: inventory.ResourceMemories,
        memobase: ephemera.AnyMemo,
        resource: references.Resource,
        raw_event: bodies.RawEvent,
        event_queue: posting.K8sEventQueue,
        stream_pressure: Optional[asyncio.Event] = None,  # None for tests
        resource_indexed: Optional[aiotoggles.Toggle] = None,  # None for tests & observation
        operator_indexed: Optional[aiotoggles.ToggleSet] = None,  # None for tests & observation
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.
    """

    # Recall what is stored about that object. Share it in little portions with the consumers.
    # And immediately forget it if the object is deleted from the cluster (but keep in memory).
    raw_type, raw_body = raw_event['type'], raw_event['object']
    memory = await memories.recall(raw_body, noticed_by_listing=raw_type is None, memobase=memobase)
    if memory.daemons_memory.live_fresh_body is not None:
        memory.daemons_memory.live_fresh_body._replace_with(raw_body)
    if raw_type == 'DELETED':
        await memories.forget(raw_body)

    # Convert to a heavy mapping-view wrapper only now, when heavy processing begins.
    # Raw-event streaming, queueing, and batching use regular lightweight dicts.
    # Why here? 1. Before it splits into multiple causes & handlers for the same object's body;
    # 2. After it is batched (queueing); 3. While the "raw" parsed JSON is still known;
    # 4. Same as where a patch object of a similar wrapping semantics is created.
    live_fresh_body = memory.daemons_memory.live_fresh_body
    body = live_fresh_body if live_fresh_body is not None else bodies.Body(raw_body)
    patch = patches.Patch()

    # Different loggers for different cases with different verbosity and exposure.
    local_logger = loggers.LocalObjectLogger(body=body, settings=settings)
    terse_logger = loggers.TerseObjectLogger(body=body, settings=settings)
    event_logger = loggers.ObjectLogger(body=body, settings=settings)

    # Throttle the non-handler-related errors. The regular event watching/batching continues
    # to prevent queue overfilling, but the processing is skipped (events are ignored).
    # Choice of place: late enough to have a per-resource memory for a throttler; also, a logger.
    # But early enough to catch environment errors from K8s API, and from most of the complex code.
    async with throttlers.throttled(
        throttler=memory.error_throttler,
        logger=local_logger,
        delays=settings.batching.error_delays,
        wakeup=stream_pressure,
    ) as should_run:
        if should_run:

            # Each object has its own prefixed logger, to distinguish parallel handling.
            posting.event_queue_loop_var.set(asyncio.get_running_loop())
            posting.event_queue_var.set(event_queue)  # till the end of this object's task.

            # [Pre-]populate the indices. This must be lightweight.
            await indexing.index_resource(
                registry=registry,
                indexers=indexers,
                settings=settings,
                resource=resource,
                raw_event=raw_event,
                body=body,
                memo=memory.memo,
                memory=memory.indexing_memory,
                logger=terse_logger,
            )

            # Wait for all other individual resources and all other resource kinds' lists to finish.
            # If this one has changed while waiting for the global readiness, let it be reprocessed.
            if operator_indexed is not None and resource_indexed is not None:
                await operator_indexed.drop_toggle(resource_indexed)
            if operator_indexed is not None:
                await operator_indexed.wait_for(True)  # other resource kinds & objects.
            if stream_pressure is not None and stream_pressure.is_set():
                return

            # Do the magic -- do the job.
            delays, matched = await process_resource_causes(
                lifecycle=lifecycle,
                indexers=indexers,
                registry=registry,
                settings=settings,
                resource=resource,
                raw_event=raw_event,
                body=body,
                patch=patch,
                memory=memory,
                local_logger=local_logger,
                event_logger=event_logger,
            )

            # Whatever was done, apply the accumulated changes to the object, or sleep-n-touch for delays.
            # But only once, to reduce the number of API calls and the generated irrelevant events.
            # And only if the object is at least supposed to exist (not "GONE"), even if actually does not.
            if raw_event['type'] != 'DELETED':
                applied = await application.apply(
                    settings=settings,
                    resource=resource,
                    body=body,
                    patch=patch,
                    logger=local_logger,
                    delays=delays,
                    stream_pressure=stream_pressure,
                )
                if applied and matched:
                    local_logger.debug("Handling cycle is finished, waiting for new changes.")