コード例 #1
0
async def touch(
    *,
    identity: Identity,
    settings: configuration.OperatorSettings,
    resource: references.Resource,
    namespace: references.Namespace,
    lifetime: Optional[int] = None,
) -> None:
    name = settings.peering.name
    peer = Peer(
        identity=identity,
        priority=settings.peering.priority,
        lifetime=settings.peering.lifetime if lifetime is None else lifetime,
    )

    patch = patches.Patch()
    patch.update(
        {'status': {
            identity: None if peer.is_dead else peer.as_dict()
        }})
    rsp = await patching.patch_obj(
        settings=settings,
        resource=resource,
        namespace=namespace,
        name=name,
        patch=patch,
        logger=logger,
    )

    if not settings.peering.stealth or rsp is None:
        where = f"in {namespace!r}" if namespace else "cluster-wide"
        result = "not found" if rsp is None else "ok"
        logger.debug(f"Keep-alive in {name!r} {where}: {result}.")
コード例 #2
0
ファイル: indexing.py プロジェクト: philipp-sontag-by/kopf
async def index_resource(
    *,
    indexers: OperatorIndexers,
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    resource: references.Resource,
    raw_event: bodies.RawEvent,
    memory: IndexingMemory,
    logger: typedefs.Logger,
    memo: ephemera.AnyMemo,
    body: bodies.Body,
) -> None:
    """
    Populate the indices from the received event. Log but ignore all errors.

    This is a lightweight and standalone process, which is executed before
    any real handlers are invoked. Multi-step calls are also not supported.
    If the handler fails, it fails and is never retried.

    Note: K8s-event posting is skipped for `kopf.on.event` handlers,
    as they should be silent. Still, the messages are logged normally.
    """
    if not registry._indexing.has_handlers(resource=resource):
        pass
    elif raw_event['type'] == 'DELETED':
        # Do not index it if it is deleted. Just discard quickly (ASAP!).
        indexers.discard(body=body)
    else:
        # Otherwise, go for full indexing with handlers invocation with all kwargs.
        cause = causes.IndexingCause(
            resource=resource,
            indices=indexers.indices,
            logger=logger,
            patch=patches.Patch(),  # NB: not applied. TODO: get rid of it!
            memo=memo,
            body=body,
        )

        # Note: the indexing state contains only failures & retries. Successes will be re-executed.
        indexing_handlers = registry._indexing.get_handlers(cause=cause)
        state = memory.indexing_state
        state = state if state is not None else progression.State.from_scratch(
        )
        state = state.with_handlers(indexing_handlers)
        outcomes = await execution.execute_handlers_once(
            lifecycle=lifecycles.all_at_once,
            settings=settings,
            handlers=indexing_handlers,
            cause=cause,
            state=state,
            default_errors=execution.ErrorsMode.IGNORED,
        )
        indexers.replace(body=body, outcomes=outcomes)

        # Remember only failures & retries. Omit successes -- let them be re-executed every time.
        state = state.with_outcomes(outcomes).without_successes()
        memory.indexing_state = state if state else None
コード例 #3
0
async def spawn_daemons(
    *,
    settings: configuration.OperatorSettings,
    handlers: Sequence[handlers_.SpawningHandler],
    daemons: MutableMapping[ids.HandlerId, Daemon],
    cause: causes.SpawningCause,
    memory: DaemonsMemory,
) -> Collection[float]:
    """
    Ensure that all daemons are spawned for this individual resource.

    This function can be called multiple times on multiple handling cycles
    (though usually should be called on the first-seen occasion), so it must
    be idempotent: not having duplicating side-effects on multiple calls.
    """
    if memory.live_fresh_body is None:  # for type-checking; "not None" is ensured in processing.
        raise RuntimeError(
            "A daemon is spawned with None as body. This is a bug. Please report."
        )
    for handler in handlers:
        if handler.id not in daemons:
            stopper = stoppers.DaemonStopper()
            daemon_cause = causes.DaemonCause(
                resource=cause.resource,
                indices=cause.indices,
                logger=cause.logger,
                memo=cause.memo,
                body=memory.live_fresh_body,
                patch=patches.Patch(
                ),  # not the same as the one-shot spawning patch!
                stopper=stopper,  # for checking (passed to kwargs)
            )
            daemon = Daemon(
                stopper=stopper,  # for stopping (outside of causes)
                handler=handler,
                logger=loggers.LocalObjectLogger(body=cause.body,
                                                 settings=settings),
                task=aiotasks.create_task(
                    _runner(
                        settings=settings,
                        daemons=daemons,  # for self-garbage-collection
                        handler=handler,
                        cause=daemon_cause,
                        memory=memory,
                    ),
                    name=f'runner of {handler.id}'
                ),  # sometimes, daemons; sometimes, timers.
            )
            daemons[handler.id] = daemon
    return []
コード例 #4
0
async def clean(
    *,
    peers: Iterable[Peer],
    settings: configuration.OperatorSettings,
    resource: references.Resource,
    namespace: references.Namespace,
) -> None:
    name = settings.peering.name
    patch = patches.Patch()
    patch.update({'status': {peer.identity: None for peer in peers}})
    await patching.patch_obj(resource=resource,
                             namespace=namespace,
                             name=name,
                             patch=patch)
コード例 #5
0
async def process_resource_event(
        lifecycle: execution.LifeCycleFn,
        indexers: indexing.OperatorIndexers,
        registry: registries.OperatorRegistry,
        settings: configuration.OperatorSettings,
        memories: inventory.ResourceMemories,
        memobase: ephemera.AnyMemo,
        resource: references.Resource,
        raw_event: bodies.RawEvent,
        event_queue: posting.K8sEventQueue,
        stream_pressure: Optional[asyncio.Event] = None,  # None for tests
        resource_indexed: Optional[aiotoggles.Toggle] = None,  # None for tests & observation
        operator_indexed: Optional[aiotoggles.ToggleSet] = None,  # None for tests & observation
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.
    """

    # Recall what is stored about that object. Share it in little portions with the consumers.
    # And immediately forget it if the object is deleted from the cluster (but keep in memory).
    raw_type, raw_body = raw_event['type'], raw_event['object']
    memory = await memories.recall(raw_body, noticed_by_listing=raw_type is None, memobase=memobase)
    if memory.daemons_memory.live_fresh_body is not None:
        memory.daemons_memory.live_fresh_body._replace_with(raw_body)
    if raw_type == 'DELETED':
        await memories.forget(raw_body)

    # Convert to a heavy mapping-view wrapper only now, when heavy processing begins.
    # Raw-event streaming, queueing, and batching use regular lightweight dicts.
    # Why here? 1. Before it splits into multiple causes & handlers for the same object's body;
    # 2. After it is batched (queueing); 3. While the "raw" parsed JSON is still known;
    # 4. Same as where a patch object of a similar wrapping semantics is created.
    live_fresh_body = memory.daemons_memory.live_fresh_body
    body = live_fresh_body if live_fresh_body is not None else bodies.Body(raw_body)
    patch = patches.Patch()

    # Different loggers for different cases with different verbosity and exposure.
    local_logger = loggers.LocalObjectLogger(body=body, settings=settings)
    terse_logger = loggers.TerseObjectLogger(body=body, settings=settings)
    event_logger = loggers.ObjectLogger(body=body, settings=settings)

    # Throttle the non-handler-related errors. The regular event watching/batching continues
    # to prevent queue overfilling, but the processing is skipped (events are ignored).
    # Choice of place: late enough to have a per-resource memory for a throttler; also, a logger.
    # But early enough to catch environment errors from K8s API, and from most of the complex code.
    async with throttlers.throttled(
        throttler=memory.error_throttler,
        logger=local_logger,
        delays=settings.batching.error_delays,
        wakeup=stream_pressure,
    ) as should_run:
        if should_run:

            # Each object has its own prefixed logger, to distinguish parallel handling.
            posting.event_queue_loop_var.set(asyncio.get_running_loop())
            posting.event_queue_var.set(event_queue)  # till the end of this object's task.

            # [Pre-]populate the indices. This must be lightweight.
            await indexing.index_resource(
                registry=registry,
                indexers=indexers,
                settings=settings,
                resource=resource,
                raw_event=raw_event,
                body=body,
                memo=memory.memo,
                memory=memory.indexing_memory,
                logger=terse_logger,
            )

            # Wait for all other individual resources and all other resource kinds' lists to finish.
            # If this one has changed while waiting for the global readiness, let it be reprocessed.
            if operator_indexed is not None and resource_indexed is not None:
                await operator_indexed.drop_toggle(resource_indexed)
            if operator_indexed is not None:
                await operator_indexed.wait_for(True)  # other resource kinds & objects.
            if stream_pressure is not None and stream_pressure.is_set():
                return

            # Do the magic -- do the job.
            delays, matched = await process_resource_causes(
                lifecycle=lifecycle,
                indexers=indexers,
                registry=registry,
                settings=settings,
                resource=resource,
                raw_event=raw_event,
                body=body,
                patch=patch,
                memory=memory,
                local_logger=local_logger,
                event_logger=event_logger,
            )

            # Whatever was done, apply the accumulated changes to the object, or sleep-n-touch for delays.
            # But only once, to reduce the number of API calls and the generated irrelevant events.
            # And only if the object is at least supposed to exist (not "GONE"), even if actually does not.
            if raw_event['type'] != 'DELETED':
                applied = await application.apply(
                    settings=settings,
                    resource=resource,
                    body=body,
                    patch=patch,
                    logger=local_logger,
                    delays=delays,
                    stream_pressure=stream_pressure,
                )
                if applied and matched:
                    local_logger.debug("Handling cycle is finished, waiting for new changes.")
コード例 #6
0
ファイル: admission.py プロジェクト: mbeacom/kopf
async def serve_admission_request(
        # Required for all webhook servers, meaningless without it:
        request: reviews.Request,
        *,
        # Optional for webhook servers that can recognise this information:
        headers: Optional[Mapping[str, str]] = None,
        sslpeer: Optional[Mapping[str, Any]] = None,
        webhook: Optional[ids.HandlerId] = None,
        reason: Optional[causes.WebhookType] = None,  # TODO: undocumented: requires typing clarity!
        # Injected by partial() from spawn_tasks():
        settings: configuration.OperatorSettings,
        memories: MemoGetter,
        memobase: ephemera.AnyMemo,
        registry: registries.OperatorRegistry,
        insights: references.Insights,
        indices: ephemera.Indices,
) -> reviews.Response:
    """
    The actual and the only implementation of the `WebhookFn` protocol.

    This function is passed to all webhook servers/tunnels to be called
    whenever a new admission request is received.

    Some parameters are provided by the framework itself via partial binding,
    so that the resulting function matches the `WebhookFn` protocol. Other
    parameters are passed by the webhook servers when they call the function.
    """

    # Reconstruct the cause specially for web handlers.
    resource = find_resource(request=request, insights=insights)
    subresource = request.get('request', {}).get('subResource')
    operation = request.get('request', {}).get('operation')
    userinfo = request.get('request', {}).get('userInfo')
    new_body = request.get('request', {}).get('object')
    old_body = request.get('request', {}).get('oldObject')
    raw_body = new_body if new_body is not None else old_body
    if userinfo is None:
        raise MissingDataError("User info is missing from the admission request.")
    if raw_body is None:
        raise MissingDataError("Either old or new object is missing from the admission request.")

    memo = await memories.recall_memo(raw_body, memobase=memobase, ephemeral=operation=='CREATE')
    body = bodies.Body(raw_body)
    patch = patches.Patch(body=raw_body)
    warnings: List[str] = []
    cause = causes.WebhookCause(
        resource=resource,
        indices=indices,
        logger=loggers.LocalObjectLogger(body=body, settings=settings),
        patch=patch,
        memo=memo,
        body=body,
        userinfo=userinfo,
        warnings=warnings,
        operation=operation,
        subresource=subresource,
        dryrun=bool(request.get('request', {}).get('dryRun')),
        sslpeer=sslpeer if sslpeer is not None else {},  # ensure a mapping even if not provided.
        headers=headers if headers is not None else {},  # ensure a mapping even if not provided.
        webhook=webhook,
        reason=reason,
    )

    # Retrieve the handlers to be executed; maybe only one if the webhook server provides a hint.
    handlers_ = registry._webhooks.get_handlers(cause)
    state = progression.State.from_scratch().with_handlers(handlers_)
    outcomes = await execution.execute_handlers_once(
        lifecycle=lifecycles.all_at_once,
        settings=settings,
        handlers=handlers_,
        cause=cause,
        state=state,
        default_errors=execution.ErrorsMode.PERMANENT,
    )

    # Construct the response as per Kubernetes's conventions and expectations.
    response = build_response(
        request=request,
        outcomes=outcomes,
        warnings=warnings,
        jsonpatch=patch.as_json_patch(),
    )
    return response
コード例 #7
0
ファイル: admission.py プロジェクト: mbeacom/kopf
async def configuration_manager(
        *,
        reason: causes.WebhookType,
        selector: references.Selector,
        registry: registries.OperatorRegistry,
        settings: configuration.OperatorSettings,
        insights: references.Insights,
        container: aiovalues.Container[reviews.WebhookClientConfig],
) -> None:
    """
    Manage the webhook configurations dynamically.

    This is one of an operator's root tasks that run forever.
    If exited, the whole operator exits as by an error.

    The manager waits for changes in one of these:

    * Observed resources in the cluster (via insights).
    * A new webhook client config yielded by the webhook server.

    On either of these occasion, the manager rebuilds the webhook configuration
    and applies it to the specified configuration resources in the cluster
    (for which it needs some RBAC permissions).
    Besides, it also creates an webhook configuration resource if it is absent.
    """

    # Do nothing if not managed. The root task cannot be skipped from creation,
    # since the managed mode is only set at the startup activities.
    if settings.admission.managed is None:
        await asyncio.Event().wait()
        return

    # Wait until the prerequisites for managing are available (scanned from the cluster).
    await insights.ready_resources.wait()
    resource = await insights.backbone.wait_for(selector)
    all_handlers = registry._webhooks.get_all_handlers()
    all_handlers = [h for h in all_handlers if h.reason == reason]

    # Optionally (if configured), pre-create the configuration objects if they are absent.
    # Use the try-or-fail strategy instead of check-and-do -- to reduce the RBAC requirements.
    try:
        await creating.create_obj(
            settings=settings,
            resource=resource,
            logger=logger,
            name=settings.admission.managed,
        )
    except errors.APIConflictError:
        pass  # exists already
    except errors.APIForbiddenError:
        logger.error(f"Not enough RBAC permissions to create a {resource}.")
        raise

    # Execute either when actually changed (yielded from the webhook server),
    # or the condition is chain-notified (from the insights: on resources/namespaces revision).
    # Ignore inconsistencies: they are expected -- the server fills the defaults.
    client_config: Optional[reviews.WebhookClientConfig] = None
    try:
        async for client_config in container.as_changed():
            logger.info(f"Reconfiguring the {reason.value} webhook {settings.admission.managed}.")
            webhooks = build_webhooks(
                all_handlers,
                resources=insights.webhook_resources,
                name_suffix=settings.admission.managed,
                client_config=client_config)
            await patching.patch_obj(
                settings=settings,
                resource=resource,
                namespace=None,
                name=settings.admission.managed,
                patch=patches.Patch({'webhooks': webhooks}),
                logger=logger,
            )
    finally:
        # Attempt to remove all managed webhooks, except for the strict ones.
        if client_config is not None:
            logger.info(f"Cleaning up the admission webhook {settings.admission.managed}.")
            webhooks = build_webhooks(
                all_handlers,
                resources=insights.webhook_resources,
                name_suffix=settings.admission.managed,
                client_config=client_config,
                persistent_only=True)
            await patching.patch_obj(
                settings=settings,
                resource=resource,
                namespace=None,
                name=settings.admission.managed,
                patch=patches.Patch({'webhooks': webhooks}),
                logger=logger,
            )
コード例 #8
0
ファイル: application.py プロジェクト: philipp-sontag-by/kopf
async def apply(
        *,
        settings: configuration.OperatorSettings,
        resource: references.Resource,
        body: bodies.Body,
        patch: patches.Patch,
        delays: Collection[float],
        logger: loggers.ObjectLogger,
        stream_pressure: Optional[asyncio.Event] = None,  # None for tests
) -> bool:
    delay = min(delays) if delays else None

    # Delete dummies on occasion, but don't trigger special patching for them [discussable].
    if patch:  # TODO: LATER: and the dummies are there (without additional methods?)
        settings.persistence.progress_storage.touch(body=body,
                                                    patch=patch,
                                                    value=None)

    # Actually patch if it was not empty originally or after the dummies removal.
    await patch_and_check(
        settings=settings,
        resource=resource,
        logger=logger,
        patch=patch,
        body=body,
    )

    # Sleep strictly after patching, never before -- to keep the status proper.
    # The patching above, if done, interrupts the sleep instantly, so we skip it at all.
    # Note: a zero-second or negative sleep is still a sleep, it will trigger a dummy patch.
    applied = False
    if delay and patch:
        logger.debug(
            f"Sleeping was skipped because of the patch, {delay} seconds left."
        )
    elif delay is not None:
        if delay > WAITING_KEEPALIVE_INTERVAL:
            limit = WAITING_KEEPALIVE_INTERVAL
            logger.debug(
                f"Sleeping for {delay} (capped {limit}) seconds for the delayed handlers."
            )
            unslept_delay = await aiotime.sleep(limit, wakeup=stream_pressure)
        elif delay > 0:
            logger.debug(
                f"Sleeping for {delay} seconds for the delayed handlers.")
            unslept_delay = await aiotime.sleep(delay, wakeup=stream_pressure)
        else:
            unslept_delay = None  # no need to sleep? means: slept in full.

        # Exclude cases when touching immediately after patching (including: ``delay == 0``).
        if patch and not delay:
            pass
        elif unslept_delay is not None:
            logger.debug(
                f"Sleeping was interrupted by new changes, {unslept_delay} seconds left."
            )
        else:
            # Any unique always-changing value will work; not necessary a timestamp.
            value = datetime.datetime.utcnow().isoformat()
            touch = patches.Patch()
            settings.persistence.progress_storage.touch(body=body,
                                                        patch=touch,
                                                        value=value)
            await patch_and_check(
                settings=settings,
                resource=resource,
                logger=logger,
                patch=touch,
                body=body,
            )
    elif not patch:  # no patch/touch and no delay
        applied = True
    return applied