async def activity_trigger( *, lifecycle: lifecycles.LifeCycleFn, registry: registries.OperatorRegistry, activity: causation.Activity, ) -> Mapping[registries.HandlerId, registries.HandlerResult]: """ Execute a handling cycle until succeeded or permanently failed. This mimics the behaviour of patching-watching in Kubernetes, but in-memory. """ logger = logging.getLogger(f'kopf.activities.{activity.value}') # For the activity handlers, we have neither bodies, nor patches, just the state. cause = causation.ActivityCause(logger=logger, activity=activity) handlers = registry.get_activity_handlers(activity=activity) state = states.State.from_scratch(handlers=handlers) latest_outcomes: MutableMapping[registries.HandlerId, states.HandlerOutcome] = {} while not state.done: outcomes = await _execute_handlers( lifecycle=lifecycle, handlers=handlers, cause=cause, state=state, ) latest_outcomes.update(outcomes) state = state.with_outcomes(outcomes) delay = state.delay if delay: await sleeping.sleep_or_wait( min(delay, WAITING_KEEPALIVE_INTERVAL), asyncio.Event()) # Activities assume that all handlers must eventually succeed. # We raise from the 1st exception only: just to have something real in the tracebacks. # For multiple handlers' errors, the logs should be investigated instead. exceptions = [ outcome.exception for outcome in latest_outcomes.values() if outcome.exception is not None ] if exceptions: raise ActivityError("One or more handlers failed.", outcomes=latest_outcomes) \ from exceptions[0] # If nothing has failed, we return identifiable results. The outcomes/states are internal. # The order of results is not guaranteed (the handlers can succeed on one of the retries). results = { handler_id: outcome.result for handler_id, outcome in latest_outcomes.items() if outcome.result is not None } return results
def test_on_probe_with_all_kwargs(mocker): registry = OperatorRegistry() @kopf.on.probe( id='id', registry=registry, errors=ErrorsMode.PERMANENT, timeout=123, retries=456, backoff=78) def fn(**_): pass handlers = registry.get_activity_handlers(activity=Activity.PROBE) assert len(handlers) == 1 assert handlers[0].fn is fn assert handlers[0].activity == Activity.PROBE assert handlers[0].id == 'id' assert handlers[0].errors == ErrorsMode.PERMANENT assert handlers[0].timeout == 123 assert handlers[0].retries == 456 assert handlers[0].backoff == 78 assert handlers[0].cooldown == 78 # deprecated alias
def test_on_cleanup_with_all_kwargs(mocker): registry = OperatorRegistry() @kopf.on.cleanup( id='id', registry=registry, errors=ErrorsMode.PERMANENT, timeout=123, retries=456, backoff=78) def fn(**_): pass with pytest.deprecated_call(match=r"use registry.activity_handlers"): handlers = registry.get_activity_handlers(activity=Activity.CLEANUP) assert len(handlers) == 1 assert handlers[0].fn is fn assert handlers[0].activity == Activity.CLEANUP assert handlers[0].id == 'id' assert handlers[0].errors == ErrorsMode.PERMANENT assert handlers[0].timeout == 123 assert handlers[0].retries == 456 assert handlers[0].backoff == 78
def test_on_startup_with_all_kwargs(mocker): registry = OperatorRegistry() @kopf.on.startup(id='id', registry=registry, errors=ErrorsMode.PERMANENT, timeout=123, retries=456, backoff=78) def fn(**_): pass handlers = registry.get_activity_handlers(activity=Activity.STARTUP) assert len(handlers) == 1 assert handlers[0].fn is fn assert handlers[0].activity == Activity.STARTUP assert handlers[0].id == 'id' assert handlers[0].errors == ErrorsMode.PERMANENT assert handlers[0].timeout == 123 assert handlers[0].retries == 456 assert handlers[0].backoff == 78
def test_on_probe_with_all_kwargs(mocker): registry = OperatorRegistry() @kopf.on.probe(id='id', registry=registry, errors=ErrorsMode.PERMANENT, timeout=123, retries=456, backoff=78) def fn(**_): pass with pytest.deprecated_call(match=r"cease using the internal registries"): handlers = registry.get_activity_handlers(activity=Activity.PROBE) assert len(handlers) == 1 assert handlers[0].fn is fn assert handlers[0].activity == Activity.PROBE assert handlers[0].id == 'id' assert handlers[0].errors == ErrorsMode.PERMANENT assert handlers[0].timeout == 123 assert handlers[0].retries == 456 assert handlers[0].backoff == 78
async def run_activity( *, lifecycle: lifecycles.LifeCycleFn, registry: registries.OperatorRegistry, activity: causation.Activity, ) -> Mapping[handlers.HandlerId, callbacks.HandlerResult]: logger = logging.getLogger(f'kopf.activities.{activity.value}') # For the activity handlers, we have neither bodies, nor patches, just the state. cause = causation.ActivityCause(logger=logger, activity=activity) handlers = registry.get_activity_handlers(activity=activity) outcomes = await handling.run_handlers_until_done( cause=cause, handlers=handlers, lifecycle=lifecycle, ) # Activities assume that all handlers must eventually succeed. # We raise from the 1st exception only: just to have something real in the tracebacks. # For multiple handlers' errors, the logs should be investigated instead. exceptions = [ outcome.exception for outcome in outcomes.values() if outcome.exception is not None ] if exceptions: raise ActivityError("One or more handlers failed.", outcomes=outcomes) from exceptions[0] # If nothing has failed, we return identifiable results. The outcomes/states are internal. # The order of results is not guaranteed (the handlers can succeed on one of the retries). results = { handler_id: outcome.result for handler_id, outcome in outcomes.items() if outcome.result is not None } return results