Ejemplo n.º 1
0
 def decorator(  # lgtm[py/similar-function]
         fn: callbacks.DaemonFn,
 ) -> callbacks.DaemonFn:
     _warn_conflicting_values(field, value)
     _verify_filters(labels, annotations)
     real_registry = registry if registry is not None else registries.get_default_registry()
     real_field = dicts.parse_field(field) or None  # to not store tuple() as a no-field case.
     real_id = registries.generate_id(fn=fn, id=id, suffix=".".join(real_field or []))
     selector = references.Selector(
         __group_or_groupversion_or_name, __version_or_name, __name,
         group=group, version=version,
         kind=kind, plural=plural, singular=singular, shortcut=shortcut, category=category,
     )
     handler = handlers.DaemonHandler(
         fn=fn, id=real_id, param=param,
         errors=errors, timeout=timeout, retries=retries, backoff=backoff,
         selector=selector, labels=labels, annotations=annotations, when=when,
         field=real_field, value=value,
         initial_delay=initial_delay, requires_finalizer=True,
         cancellation_backoff=cancellation_backoff,
         cancellation_timeout=cancellation_timeout,
         cancellation_polling=cancellation_polling,
     )
     real_registry._spawning.append(handler)
     return fn
Ejemplo n.º 2
0
 def decorator(  # lgtm[py/similar-function]
         fn: callbacks.ActivityFn,
 ) -> callbacks.ActivityFn:
     real_registry = registry if registry is not None else registries.get_default_registry()
     real_id = registries.generate_id(fn=fn, id=id)
     handler = handlers.ActivityHandler(
         fn=fn, id=real_id, param=param,
         errors=errors, timeout=timeout, retries=retries, backoff=backoff,
         activity=causes.Activity.AUTHENTICATION,
     )
     real_registry._activities.append(handler)
     return fn
Ejemplo n.º 3
0
 def decorator(  # lgtm[py/similar-function]
     fn: callbacks.ChangingFn, ) -> callbacks.ChangingFn:
     _warn_conflicting_values(field, value)
     _verify_filters(labels, annotations)
     real_registry = registry if registry is not None else registries.get_default_registry(
     )
     real_field = dicts.parse_field(
         field) or None  # to not store tuple() as a no-field case.
     real_id = registries.generate_id(fn=fn,
                                      id=id,
                                      suffix=".".join(real_field or []))
     selector = references.Selector(
         __group_or_groupversion_or_name,
         __version_or_name,
         __name,
         group=group,
         version=version,
         kind=kind,
         plural=plural,
         singular=singular,
         shortcut=shortcut,
         category=category,
     )
     handler = handlers.ChangingHandler(
         fn=fn,
         id=real_id,
         param=param,
         errors=errors,
         timeout=timeout,
         retries=retries,
         backoff=backoff,
         selector=selector,
         labels=labels,
         annotations=annotations,
         when=when,
         field=real_field,
         value=value,
         old=None,
         new=None,
         field_needs_change=False,
         initial=None,
         deleted=None,
         requires_finalizer=bool(not optional),
         reason=causes.Reason.DELETE,
     )
     real_registry._changing.append(handler)
     return fn
Ejemplo n.º 4
0
 def decorator(  # lgtm[py/similar-function]
     fn: callbacks.WebhookFn, ) -> callbacks.WebhookFn:
     _warn_conflicting_values(field, value)
     _verify_filters(labels, annotations)
     real_registry = registry if registry is not None else registries.get_default_registry(
     )
     real_field = dicts.parse_field(
         field) or None  # to not store tuple() as a no-field case.
     real_id = registries.generate_id(fn=fn,
                                      id=id,
                                      suffix=".".join(real_field or []))
     selector = references.Selector(
         __group_or_groupversion_or_name,
         __version_or_name,
         __name,
         group=group,
         version=version,
         kind=kind,
         plural=plural,
         singular=singular,
         shortcut=shortcut,
         category=category,
     )
     handler = handlers.WebhookHandler(
         fn=fn,
         id=real_id,
         param=param,
         errors=None,
         timeout=None,
         retries=None,
         backoff=None,  # TODO: add some meaning later
         selector=selector,
         labels=labels,
         annotations=annotations,
         when=when,
         field=real_field,
         value=value,
         reason=causes.WebhookType.MUTATING,
         operation=operation,
         persistent=persistent,
         side_effects=side_effects,
         ignore_failures=ignore_failures,
     )
     real_registry._webhooks.append(handler)
     return fn
Ejemplo n.º 5
0
 def decorator(  # lgtm[py/similar-function]
         fn: callbacks.IndexingFn,
 ) -> callbacks.IndexingFn:
     _warn_conflicting_values(field, value)
     _verify_filters(labels, annotations)
     real_registry = registry if registry is not None else registries.get_default_registry()
     real_field = dicts.parse_field(field) or None  # to not store tuple() as a no-field case.
     real_id = registries.generate_id(fn=fn, id=id)
     selector = references.Selector(
         __group_or_groupversion_or_name, __version_or_name, __name,
         group=group, version=version,
         kind=kind, plural=plural, singular=singular, shortcut=shortcut, category=category,
     )
     handler = handlers.IndexingHandler(
         fn=fn, id=real_id, param=param,
         errors=errors, timeout=timeout, retries=retries, backoff=backoff,
         selector=selector, labels=labels, annotations=annotations, when=when,
         field=real_field, value=value,
     )
     real_registry._indexing.append(handler)
     return fn
Ejemplo n.º 6
0
async def spawn_tasks(
        *,
        lifecycle: Optional[execution.LifeCycleFn] = None,
        indexers: Optional[indexing.OperatorIndexers] = None,
        registry: Optional[registries.OperatorRegistry] = None,
        settings: Optional[configuration.OperatorSettings] = None,
        memories: Optional[inventory.ResourceMemories] = None,
        insights: Optional[references.Insights] = None,
        identity: Optional[peering.Identity] = None,
        standalone: Optional[bool] = None,
        priority: Optional[int] = None,
        peering_name: Optional[str] = None,
        liveness_endpoint: Optional[str] = None,
        clusterwide: bool = False,
        namespaces: Collection[references.NamespacePattern] = (),
        namespace: Optional[references.NamespacePattern] = None,  # deprecated
        stop_flag: Optional[aioadapters.Flag] = None,
        ready_flag: Optional[aioadapters.Flag] = None,
        vault: Optional[credentials.Vault] = None,
        memo: Optional[object] = None,
        _command: Optional[Coroutine[None, None, None]] = None,
) -> Collection[aiotasks.Task]:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    if namespaces and namespace:
        raise TypeError("Either namespaces= or namespace= can be passed. Got both.")
    elif namespace:
        warnings.warn("namespace= is deprecated; use namespaces=[...]", DeprecationWarning)
        namespaces = [namespace]

    if clusterwide and namespaces:
        raise TypeError("The operator can be either cluster-wide or namespaced, not both.")
    if not clusterwide and not namespaces:
        warnings.warn("Absence of either namespaces or cluster-wide flag will become an error soon."
                      " For now, switching to the cluster-wide mode for backward compatibility.",
                      FutureWarning)
        clusterwide = True

    # All tasks of the operator are synced via these primitives and structures:
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle()
    registry = registry if registry is not None else registries.get_default_registry()
    settings = settings if settings is not None else configuration.OperatorSettings()
    memories = memories if memories is not None else inventory.ResourceMemories()
    indexers = indexers if indexers is not None else indexing.OperatorIndexers()
    insights = insights if insights is not None else references.Insights()
    identity = identity if identity is not None else peering.detect_own_id(manual=False)
    vault = vault if vault is not None else credentials.Vault()
    memo = memo if memo is not None else ephemera.Memo()
    memo = ephemera.AnyMemo(memo)  # type-casted
    event_queue: posting.K8sEventQueue = asyncio.Queue()
    signal_flag: aiotasks.Future = asyncio.Future()
    started_flag: asyncio.Event = asyncio.Event()
    operator_paused = aiotoggles.ToggleSet(any)
    tasks: MutableSequence[aiotasks.Task] = []

    # Map kwargs into the settings object.
    settings.peering.clusterwide = clusterwide
    if peering_name is not None:
        settings.peering.mandatory = True
        settings.peering.name = peering_name
    if standalone is not None:
        settings.peering.standalone = standalone
    if priority is not None:
        settings.peering.priority = priority

    # Prepopulate indexers with empty indices -- to be available startup handlers.
    indexers.ensure(registry._indexing.get_all_handlers())

    # Global credentials store for this operator, also for CRD-reading & peering mode detection.
    auth.vault_var.set(vault)

    # Special case: pass the settings container through the user-side handlers (no explicit args).
    # Toolkits have to keep the original operator context somehow, and the only way is contextvars.
    posting.settings_var.set(settings)

    # Few common background forever-running infrastructural tasks (irregular root tasks).
    tasks.append(aiotasks.create_task(
        name="stop-flag checker",
        coro=_stop_flag_checker(
            signal_flag=signal_flag,
            stop_flag=stop_flag)))
    tasks.append(aiotasks.create_task(
        name="ultimate termination",
        coro=_ultimate_termination(
            settings=settings,
            stop_flag=stop_flag)))
    tasks.append(aiotasks.create_task(
        name="startup/cleanup activities",
        coro=_startup_cleanup_activities(
            root_tasks=tasks,  # used as a "live" view, populated later.
            ready_flag=ready_flag,
            started_flag=started_flag,
            registry=registry,
            settings=settings,
            indices=indexers.indices,
            vault=vault,
            memo=memo)))  # to purge & finalize the caches in the end.

    # Kill all the daemons gracefully when the operator exits (so that they are not "hung").
    tasks.append(aiotasks.create_guarded_task(
        name="daemon killer", flag=started_flag, logger=logger,
        coro=daemons.daemon_killer(
            settings=settings,
            memories=memories,
            operator_paused=operator_paused)))

    # Keeping the credentials fresh and valid via the authentication handlers on demand.
    tasks.append(aiotasks.create_guarded_task(
        name="credentials retriever", flag=started_flag, logger=logger,
        coro=activities.authenticator(
            registry=registry,
            settings=settings,
            indices=indexers.indices,
            vault=vault,
            memo=memo)))

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.append(aiotasks.create_guarded_task(
        name="poster of events", flag=started_flag, logger=logger,
        coro=posting.poster(
            backbone=insights.backbone,
            event_queue=event_queue)))

    # Liveness probing -- so that Kubernetes would know that the operator is alive.
    if liveness_endpoint:
        tasks.append(aiotasks.create_guarded_task(
            name="health reporter", flag=started_flag, logger=logger,
            coro=probing.health_reporter(
                registry=registry,
                settings=settings,
                endpoint=liveness_endpoint,
                indices=indexers.indices,
                memo=memo)))

    # Admission webhooks run as either a server or a tunnel or a fixed config.
    # The webhook manager automatically adjusts the cluster configuration at runtime.
    container: aiovalues.Container[reviews.WebhookClientConfig] = aiovalues.Container()
    tasks.append(aiotasks.create_guarded_task(
        name="admission insights chain", flag=started_flag, logger=logger,
        coro=aiobindings.condition_chain(
            source=insights.revised, target=container.changed)))
    tasks.append(aiotasks.create_guarded_task(
        name="admission validating configuration manager", flag=started_flag, logger=logger,
        coro=admission.validating_configuration_manager(
            container=container, settings=settings, registry=registry, insights=insights)))
    tasks.append(aiotasks.create_guarded_task(
        name="admission mutating configuration manager", flag=started_flag, logger=logger,
        coro=admission.mutating_configuration_manager(
            container=container, settings=settings, registry=registry, insights=insights)))
    tasks.append(aiotasks.create_guarded_task(
        name="admission webhook server", flag=started_flag, logger=logger,
        coro=admission.admission_webhook_server(
            container=container, settings=settings, registry=registry, insights=insights,
            webhookfn=functools.partial(admission.serve_admission_request,
                                        settings=settings, registry=registry, insights=insights,
                                        memories=memories, memobase=memo,
                                        indices=indexers.indices))))

    # Permanent observation of what resource kinds and namespaces are available in the cluster.
    # Spawn and cancel dimensional tasks as they come and go; dimensions = resources x namespaces.
    tasks.append(aiotasks.create_guarded_task(
        name="resource observer", flag=started_flag, logger=logger,
        coro=observation.resource_observer(
            insights=insights,
            registry=registry,
            settings=settings)))
    tasks.append(aiotasks.create_guarded_task(
        name="namespace observer", flag=started_flag, logger=logger,
        coro=observation.namespace_observer(
            clusterwide=clusterwide,
            namespaces=namespaces,
            insights=insights,
            settings=settings)))

    # Explicit command is a hack for the CLI to run coroutines in an operator-like environment.
    # If not specified, then use the normal resource processing. It is not exposed publicly (yet).
    if _command is not None:
        tasks.append(aiotasks.create_guarded_task(
            name="the command", flag=started_flag, logger=logger, finishable=True,
            coro=_command))
    else:
        tasks.append(aiotasks.create_guarded_task(
            name="multidimensional multitasker", flag=started_flag, logger=logger,
            coro=orchestration.ochestrator(
                settings=settings,
                insights=insights,
                identity=identity,
                operator_paused=operator_paused,
                processor=functools.partial(processing.process_resource_event,
                                            lifecycle=lifecycle,
                                            registry=registry,
                                            settings=settings,
                                            indexers=indexers,
                                            memories=memories,
                                            memobase=memo,
                                            event_queue=event_queue))))

    # Ensure that all guarded tasks got control for a moment to enter the guard.
    await asyncio.sleep(0)

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals
        try:
            loop.add_signal_handler(signal.SIGINT, signal_flag.set_result, signal.SIGINT)
            loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result, signal.SIGTERM)
        except NotImplementedError:
            logger.warning("OS signals are ignored: can't add signal handler in Windows.")

    else:
        logger.warning("OS signals are ignored: running not in the main thread.")

    return tasks