コード例 #1
0
ファイル: test_id_generation.py プロジェクト: nnazeer/kopf
def test_good_aliases_over_good_addresses__symmetric(mocker, good1, good2):
    mocker.patch('socket.gethostname', return_value='localhost')
    mocker.patch('socket.gethostbyaddr',
                 side_effect=lambda fqdn: (fqdn, [good1], [good2]))
    own_id = detect_own_id(manual=True)
    assert own_id == f'some-user@{good1}'

    mocker.patch('socket.gethostname', return_value='localhost')
    mocker.patch('socket.gethostbyaddr',
                 side_effect=lambda fqdn: (fqdn, [good2], [good1]))
    own_id = detect_own_id(manual=True)
    assert own_id == f'some-user@{good2}'
コード例 #2
0
ファイル: cli.py プロジェクト: aland-zhang/kopf
def freeze(
    id: Optional[str],
    message: Optional[str],
    lifetime: int,
    namespace: Optional[str],
    peering_name: str,
    priority: int,
) -> None:
    """ Freeze the resource handling in the cluster. """
    ourserlves = peering.Peer(
        id=id or peering.detect_own_id(manual=True),
        name=peering_name,
        namespace=namespace,
        priority=priority,
        lifetime=lifetime,
    )
    registry = registries.SmartOperatorRegistry()
    settings = configuration.OperatorSettings()
    vault = credentials.Vault()
    auth.vault_var.set(vault)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(
        asyncio.wait({
            activities.authenticate(registry=registry,
                                    settings=settings,
                                    vault=vault),
            ourserlves.keepalive(),
        }))
コード例 #3
0
ファイル: test_id_generation.py プロジェクト: nnazeer/kopf
def test_from_a_pod_id(mocker, manual):
    mocker.patch('socket.gethostname', return_value='some-host')
    mocker.patch('socket.gethostbyaddr',
                 side_effect=lambda fqdn: (fqdn, [], []))
    mocker.patch.dict(os.environ, POD_ID='some-pod-1')
    own_id = detect_own_id(manual=manual)
    assert own_id == 'some-pod-1'
コード例 #4
0
def freeze(
    id: Optional[str],
    message: Optional[str],
    lifetime: int,
    namespaces: Collection[references.NamespacePattern],
    clusterwide: bool,
    peering_name: str,
    priority: int,
) -> None:
    """ Freeze the resource handling in the cluster. """
    identity = peering.Identity(id) if id else peering.detect_own_id(
        manual=True)
    insights = references.Insights()
    settings = configuration.OperatorSettings()
    settings.peering.name = peering_name
    settings.peering.priority = priority
    return running.run(clusterwide=clusterwide,
                       namespaces=namespaces,
                       insights=insights,
                       identity=identity,
                       settings=settings,
                       _command=peering.touch_command(insights=insights,
                                                      identity=identity,
                                                      settings=settings,
                                                      lifetime=lifetime))
コード例 #5
0
def resume(
    id: Optional[str],
    namespace: references.Namespace,
    peering_name: str,
) -> None:
    """ Resume the resource handling in the cluster. """
    identity = peering.Identity(id) if id else peering.detect_own_id(
        manual=True)
    registry = registries.SmartOperatorRegistry()
    settings = configuration.OperatorSettings()
    settings.peering.name = peering_name
    vault = credentials.Vault()
    auth.vault_var.set(vault)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(
        asyncio.wait({
            activities.authenticate(registry=registry,
                                    settings=settings,
                                    vault=vault),
            peering.touch(
                identity=identity,
                settings=settings,
                namespace=namespace,
                lifetime=0,
            ),
        }))
コード例 #6
0
ファイル: test_id_generation.py プロジェクト: nnazeer/kopf
def test_suffixes_appended(mocker):
    mocker.patch('random.choices', return_value='random-str')
    mocker.patch('socket.gethostname', return_value='some-host')
    mocker.patch('socket.gethostbyaddr',
                 side_effect=lambda fqdn: (fqdn, [], []))
    with freezegun.freeze_time('2020-12-31T23:59:59.123456'):
        own_id = detect_own_id(manual=False)
    assert own_id == 'some-user@some-host/20201231235959/random-str'
コード例 #7
0
def create_tasks(
    loop: asyncio.AbstractEventLoop,
    lifecycle: Optional[Callable] = None,
    registry: Optional[registries.BaseRegistry] = None,
    standalone: bool = False,
    priority: int = 0,
    peering_name: str = peering.PEERING_DEFAULT_NAME,
    namespace: Optional[str] = None,
):
    """
    Create all the tasks needed to run the operator, but do not spawn/start them.
    The tasks are properly inter-connected depending on the runtime specification.
    They can be injected into any event loop as needed.
    """

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle(
    )
    registry = registry if registry is not None else registries.get_default_registry(
    )
    freeze = asyncio.Event()
    tasks = []

    # Monitor the peers, unless explicitly disabled.
    ourselves: Optional[peering.Peer] = peering.Peer.detect(
        id=peering.detect_own_id(),
        priority=priority,
        standalone=standalone,
        namespace=namespace,
        name=peering_name,
    )
    if ourselves:
        tasks.extend([
            loop.create_task(peering.peers_keepalive(ourselves=ourselves)),
            loop.create_task(
                watcher(namespace=namespace,
                        resource=ourselves.resource,
                        handler=functools.partial(
                            peering.peers_handler,
                            ourselves=ourselves,
                            freeze=freeze))),  # freeze is set/cleared
        ])

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.extend([
            loop.create_task(
                watcher(namespace=namespace,
                        resource=resource,
                        handler=functools.partial(
                            handling.custom_object_handler,
                            lifecycle=lifecycle,
                            registry=registry,
                            resource=resource,
                            freeze=freeze))),  # freeze is only checked
        ])

    return tasks
コード例 #8
0
def resume(id, namespace, peering_name):
    """ Resume the resource handling in the cluster. """
    cli_login()
    ourselves = peering.Peer(
        id=id or peering.detect_own_id(),
        name=peering_name,
        namespace=namespace,
    )
    loop = asyncio.get_event_loop()
    loop.run_until_complete(ourselves.disappear())
コード例 #9
0
def freeze(id, message, lifetime, namespace, peering_name, priority):
    """ Freeze the resource handling in the cluster. """
    cli_login()
    ourserlves = peering.Peer(
        id=id or peering.detect_own_id(),
        name=peering_name,
        namespace=namespace,
        priority=priority,
        lifetime=lifetime,
    )
    loop = asyncio.get_event_loop()
    loop.run_until_complete(ourserlves.keepalive())
コード例 #10
0
ファイル: cli.py プロジェクト: zalando-incubator/kopf
def resume(
    id: Optional[str],
    namespace: Optional[str],
    peering_name: str,
) -> None:
    """ Resume the resource handling in the cluster. """
    ourselves = peering.Peer(
        id=id or peering.detect_own_id(),
        name=peering_name,
        namespace=namespace,
    )
    loop = asyncio.get_event_loop()
    loop.run_until_complete(ourselves.disappear())
コード例 #11
0
ファイル: cli.py プロジェクト: zalando-incubator/kopf
def freeze(
    id: Optional[str],
    message: Optional[str],
    lifetime: int,
    namespace: Optional[str],
    peering_name: str,
    priority: int,
) -> None:
    """ Freeze the resource handling in the cluster. """
    ourserlves = peering.Peer(
        id=id or peering.detect_own_id(),
        name=peering_name,
        namespace=namespace,
        priority=priority,
        lifetime=lifetime,
    )
    loop = asyncio.get_event_loop()
    loop.run_until_complete(ourserlves.keepalive())
コード例 #12
0
ファイル: cli.py プロジェクト: aland-zhang/kopf
def resume(
    id: Optional[str],
    namespace: Optional[str],
    peering_name: str,
) -> None:
    """ Resume the resource handling in the cluster. """
    ourselves = peering.Peer(
        id=id or peering.detect_own_id(manual=True),
        name=peering_name,
        namespace=namespace,
    )
    registry = registries.SmartOperatorRegistry()
    settings = configuration.OperatorSettings()
    vault = credentials.Vault()
    auth.vault_var.set(vault)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(
        asyncio.wait({
            activities.authenticate(registry=registry,
                                    settings=settings,
                                    vault=vault),
            ourselves.disappear(),
        }))
コード例 #13
0
async def spawn_tasks(
        *,
        lifecycle: Optional[lifecycles.LifeCycleFn] = None,
        indexers: Optional[indexing.OperatorIndexers] = None,
        registry: Optional[registries.OperatorRegistry] = None,
        settings: Optional[configuration.OperatorSettings] = None,
        memories: Optional[containers.ResourceMemories] = None,
        insights: Optional[references.Insights] = None,
        identity: Optional[peering.Identity] = None,
        standalone: Optional[bool] = None,
        priority: Optional[int] = None,
        peering_name: Optional[str] = None,
        liveness_endpoint: Optional[str] = None,
        clusterwide: bool = False,
        namespaces: Collection[references.NamespacePattern] = (),
        namespace: Optional[references.NamespacePattern] = None,  # deprecated
        stop_flag: Optional[primitives.Flag] = None,
        ready_flag: Optional[primitives.Flag] = None,
        vault: Optional[credentials.Vault] = None,
        memo: Optional[ephemera.AnyMemo] = None,
        _command: Optional[Coroutine[None, None, None]] = None,
) -> Collection[aiotasks.Task]:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    if namespaces and namespace:
        raise TypeError("Either namespaces= or namespace= can be passed. Got both.")
    elif namespace:
        warnings.warn("namespace= is deprecated; use namespaces=[...]", DeprecationWarning)
        namespaces = [namespace]

    if clusterwide and namespaces:
        raise TypeError("The operator can be either cluster-wide or namespaced, not both.")
    if not clusterwide and not namespaces:
        warnings.warn("Absence of either namespaces or cluster-wide flag will become an error soon."
                      " For now, switching to the cluster-wide mode for backward compatibility.",
                      FutureWarning)
        clusterwide = True

    # All tasks of the operator are synced via these primitives and structures:
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle()
    registry = registry if registry is not None else registries.get_default_registry()
    settings = settings if settings is not None else configuration.OperatorSettings()
    memories = memories if memories is not None else containers.ResourceMemories()
    indexers = indexers if indexers is not None else indexing.OperatorIndexers()
    insights = insights if insights is not None else references.Insights()
    identity = identity if identity is not None else peering.detect_own_id(manual=False)
    vault = vault if vault is not None else credentials.Vault()
    memo = memo if memo is not None else ephemera.Memo()
    event_queue: posting.K8sEventQueue = asyncio.Queue()
    signal_flag: aiotasks.Future = asyncio.Future()
    started_flag: asyncio.Event = asyncio.Event()
    operator_paused = primitives.ToggleSet(any)
    tasks: MutableSequence[aiotasks.Task] = []

    # Map kwargs into the settings object.
    settings.peering.clusterwide = clusterwide
    if peering_name is not None:
        settings.peering.mandatory = True
        settings.peering.name = peering_name
    if standalone is not None:
        settings.peering.standalone = standalone
    if priority is not None:
        settings.peering.priority = priority

    # Prepopulate indexers with empty indices -- to be available startup handlers.
    indexers.ensure(registry._resource_indexing.get_all_handlers())

    # Global credentials store for this operator, also for CRD-reading & peering mode detection.
    auth.vault_var.set(vault)

    # Special case: pass the settings container through the user-side handlers (no explicit args).
    # Toolkits have to keep the original operator context somehow, and the only way is contextvars.
    posting.settings_var.set(settings)

    # Few common background forever-running infrastructural tasks (irregular root tasks).
    tasks.append(aiotasks.create_task(
        name="stop-flag checker",
        coro=_stop_flag_checker(
            signal_flag=signal_flag,
            stop_flag=stop_flag)))
    tasks.append(aiotasks.create_task(
        name="ultimate termination",
        coro=_ultimate_termination(
            settings=settings,
            stop_flag=stop_flag)))
    tasks.append(aiotasks.create_task(
        name="startup/cleanup activities",
        coro=_startup_cleanup_activities(
            root_tasks=tasks,  # used as a "live" view, populated later.
            ready_flag=ready_flag,
            started_flag=started_flag,
            registry=registry,
            settings=settings,
            indices=indexers.indices,
            vault=vault,
            memo=memo)))  # to purge & finalize the caches in the end.

    # Kill all the daemons gracefully when the operator exits (so that they are not "hung").
    tasks.append(aiotasks.create_guarded_task(
        name="daemon killer", flag=started_flag, logger=logger,
        coro=daemons.daemon_killer(
            settings=settings,
            memories=memories,
            operator_paused=operator_paused)))

    # Keeping the credentials fresh and valid via the authentication handlers on demand.
    tasks.append(aiotasks.create_guarded_task(
        name="credentials retriever", flag=started_flag, logger=logger,
        coro=activities.authenticator(
            registry=registry,
            settings=settings,
            indices=indexers.indices,
            vault=vault,
            memo=memo)))

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.append(aiotasks.create_guarded_task(
        name="poster of events", flag=started_flag, logger=logger,
        coro=posting.poster(
            backbone=insights.backbone,
            event_queue=event_queue)))

    # Liveness probing -- so that Kubernetes would know that the operator is alive.
    if liveness_endpoint:
        tasks.append(aiotasks.create_guarded_task(
            name="health reporter", flag=started_flag, logger=logger,
            coro=probing.health_reporter(
                registry=registry,
                settings=settings,
                endpoint=liveness_endpoint,
                indices=indexers.indices,
                memo=memo)))

    # Permanent observation of what resource kinds and namespaces are available in the cluster.
    # Spawn and cancel dimensional tasks as they come and go; dimensions = resources x namespaces.
    tasks.append(aiotasks.create_guarded_task(
        name="resource observer", flag=started_flag, logger=logger,
        coro=observation.resource_observer(
            insights=insights,
            registry=registry,
            settings=settings)))
    tasks.append(aiotasks.create_guarded_task(
        name="namespace observer", flag=started_flag, logger=logger,
        coro=observation.namespace_observer(
            clusterwide=clusterwide,
            namespaces=namespaces,
            insights=insights,
            settings=settings)))

    # Explicit command is a hack for the CLI to run coroutines in an operator-like environment.
    # If not specified, then use the normal resource processing. It is not exposed publicly (yet).
    if _command is not None:
        tasks.append(aiotasks.create_guarded_task(
            name="the command", flag=started_flag, logger=logger, finishable=True,
            coro=_command))
    else:
        tasks.append(aiotasks.create_guarded_task(
            name="multidimensional multitasker", flag=started_flag, logger=logger,
            coro=orchestration.ochestrator(
                settings=settings,
                insights=insights,
                identity=identity,
                operator_paused=operator_paused,
                processor=functools.partial(processing.process_resource_event,
                                            lifecycle=lifecycle,
                                            registry=registry,
                                            settings=settings,
                                            indexers=indexers,
                                            memories=memories,
                                            memobase=memo,
                                            event_queue=event_queue))))

    # Ensure that all guarded tasks got control for a moment to enter the guard.
    await asyncio.sleep(0)

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals
        try:
            loop.add_signal_handler(signal.SIGINT, signal_flag.set_result, signal.SIGINT)
            loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result, signal.SIGTERM)
        except NotImplementedError:
            logger.warning("OS signals are ignored: can't add signal handler in Windows.")

    else:
        logger.warning("OS signals are ignored: running not in the main thread.")

    return tasks
コード例 #14
0
ファイル: test_id_generation.py プロジェクト: nnazeer/kopf
def test_suffixes_ignored(mocker):
    mocker.patch('socket.gethostname', return_value='some-host')
    mocker.patch('socket.gethostbyaddr',
                 side_effect=lambda fqdn: (fqdn, [], []))
    own_id = detect_own_id(manual=True)
    assert own_id == 'some-user@some-host'
コード例 #15
0
ファイル: running.py プロジェクト: corka149/kopf
async def spawn_tasks(
        *,
        lifecycle: Optional[lifecycles.LifeCycleFn] = None,
        registry: Optional[registries.OperatorRegistry] = None,
        memories: Optional[containers.ResourceMemories] = None,
        standalone: bool = False,
        priority: int = 0,
        peering_name: Optional[str] = None,
        liveness_endpoint: Optional[str] = None,
        namespace: Optional[str] = None,
        stop_flag: Optional[primitives.Flag] = None,
        ready_flag: Optional[primitives.Flag] = None,
        vault: Optional[credentials.Vault] = None,
) -> Tasks:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle()
    registry = registry if registry is not None else registries.get_default_registry()
    memories = memories if memories is not None else containers.ResourceMemories()
    vault = vault if vault is not None else global_vault
    vault = vault if vault is not None else credentials.Vault()
    event_queue: posting.K8sEventQueue = asyncio.Queue()
    freeze_mode: primitives.Toggle = primitives.Toggle()
    signal_flag: asyncio_Future = asyncio.Future()
    ready_flag = ready_flag if ready_flag is not None else asyncio.Event()
    tasks: MutableSequence[asyncio_Task] = []

    # Global credentials store for this operator, also for CRD-reading & peering mode detection.
    auth.vault_var.set(vault)

    # Few common background forever-running infrastructural tasks (irregular root tasks).
    tasks.extend([
        loop.create_task(_stop_flag_checker(
            signal_flag=signal_flag,
            stop_flag=stop_flag,
        )),
        loop.create_task(_startup_cleanup_activities(
            root_tasks=tasks,  # used as a "live" view, populated later.
            ready_flag=ready_flag,
            registry=registry,
            vault=vault,  # to purge & finalize the caches in the end.
        )),
    ])

    # Keeping the credentials fresh and valid via the authentication handlers on demand.
    tasks.extend([
        loop.create_task(_root_task_checker(
            name="credentials retriever", ready_flag=ready_flag,
            coro=activities.authenticator(
                registry=registry,
                vault=vault))),
    ])

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.extend([
        loop.create_task(_root_task_checker(
            name="poster of events", ready_flag=ready_flag,
            coro=posting.poster(
                event_queue=event_queue))),
    ])

    # Liveness probing -- so that Kubernetes would know that the operator is alive.
    if liveness_endpoint:
        tasks.extend([
            loop.create_task(_root_task_checker(
                name="health reporter", ready_flag=ready_flag,
                coro=probing.health_reporter(
                    registry=registry,
                    endpoint=liveness_endpoint))),
        ])

    # Monitor the peers, unless explicitly disabled.
    ourselves: Optional[peering.Peer] = await peering.Peer.detect(
        id=peering.detect_own_id(), priority=priority,
        standalone=standalone, namespace=namespace, name=peering_name,
    )
    if ourselves:
        tasks.extend([
            loop.create_task(peering.peers_keepalive(
                ourselves=ourselves)),
            loop.create_task(_root_task_checker(
                name="watcher of peering", ready_flag=ready_flag,
                coro=queueing.watcher(
                    namespace=namespace,
                    resource=ourselves.resource,
                    processor=functools.partial(peering.process_peering_event,
                                                ourselves=ourselves,
                                                freeze_mode=freeze_mode)))),
        ])

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.extend([
            loop.create_task(_root_task_checker(
                name=f"watcher of {resource.name}", ready_flag=ready_flag,
                coro=queueing.watcher(
                    namespace=namespace,
                    resource=resource,
                    freeze_mode=freeze_mode,
                    processor=functools.partial(processing.process_resource_event,
                                                lifecycle=lifecycle,
                                                registry=registry,
                                                memories=memories,
                                                resource=resource,
                                                event_queue=event_queue)))),
        ])

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals
        try:
            loop.add_signal_handler(signal.SIGINT, signal_flag.set_result, signal.SIGINT)
            loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result, signal.SIGTERM)
        except NotImplementedError:
            logger.warning("OS signals are ignored: can't add signal handler in Windows.")

    else:
        logger.warning("OS signals are ignored: running not in the main thread.")

    return tasks
コード例 #16
0
ファイル: test_id_generation.py プロジェクト: nnazeer/kopf
def test_good_aliases_over_bad_hostnames(mocker, good, bad):
    mocker.patch('socket.gethostname', return_value=bad)
    mocker.patch('socket.gethostbyaddr',
                 side_effect=lambda fqdn: (fqdn, [good], []))
    own_id = detect_own_id(manual=True)
    assert own_id == f'some-user@{good}'
コード例 #17
0
ファイル: test_id_generation.py プロジェクト: turbaszek/kopf
def test_with_defaults():
    own_id = detect_own_id()
    assert own_id == 'some-user@some-host/2020-12-31T23:59:59.123456/random-str'
コード例 #18
0
ファイル: test_id_generation.py プロジェクト: turbaszek/kopf
def test_from_a_pod_id(mocker):
    mocker.patch.dict(os.environ, POD_ID='some-pod-1')
    own_id = detect_own_id()
    assert own_id == 'some-pod-1'
コード例 #19
0
async def spawn_tasks(
    *,
    lifecycle: Optional[lifecycles.LifeCycleFn] = None,
    registry: Optional[registries.OperatorRegistry] = None,
    settings: Optional[configuration.OperatorSettings] = None,
    memories: Optional[containers.ResourceMemories] = None,
    standalone: Optional[bool] = None,
    priority: Optional[int] = None,
    peering_name: Optional[str] = None,
    liveness_endpoint: Optional[str] = None,
    namespace: Optional[str] = None,
    stop_flag: Optional[primitives.Flag] = None,
    ready_flag: Optional[primitives.Flag] = None,
    vault: Optional[credentials.Vault] = None,
) -> Collection[aiotasks.Task]:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle(
    )
    registry = registry if registry is not None else registries.get_default_registry(
    )
    settings = settings if settings is not None else configuration.OperatorSettings(
    )
    memories = memories if memories is not None else containers.ResourceMemories(
    )
    vault = vault if vault is not None else global_vault
    vault = vault if vault is not None else credentials.Vault()
    event_queue: posting.K8sEventQueue = asyncio.Queue()
    freeze_name = f"{peering_name!r}@{namespace}" if namespace else f"cluster-wide {peering_name!r}"
    freeze_checker = primitives.ToggleSet()
    freeze_toggle = await freeze_checker.make_toggle(name=freeze_name)
    signal_flag: aiotasks.Future = asyncio.Future()
    started_flag: asyncio.Event = asyncio.Event()
    tasks: MutableSequence[aiotasks.Task] = []

    # Map kwargs into the settings object.
    if peering_name is not None:
        settings.peering.mandatory = True
        settings.peering.name = peering_name
    if standalone is not None:
        settings.peering.standalone = standalone
    if priority is not None:
        settings.peering.priority = priority

    # Global credentials store for this operator, also for CRD-reading & peering mode detection.
    auth.vault_var.set(vault)

    # Special case: pass the settings container through the user-side handlers (no explicit args).
    # Toolkits have to keep the original operator context somehow, and the only way is contextvars.
    posting.settings_var.set(settings)

    # Few common background forever-running infrastructural tasks (irregular root tasks).
    tasks.append(
        aiotasks.create_task(name="stop-flag checker",
                             coro=_stop_flag_checker(signal_flag=signal_flag,
                                                     stop_flag=stop_flag)))
    tasks.append(
        aiotasks.create_task(name="ultimate termination",
                             coro=_ultimate_termination(settings=settings,
                                                        stop_flag=stop_flag)))
    tasks.append(
        aiotasks.create_task(
            name="startup/cleanup activities",
            coro=_startup_cleanup_activities(
                root_tasks=tasks,  # used as a "live" view, populated later.
                ready_flag=ready_flag,
                started_flag=started_flag,
                registry=registry,
                settings=settings,
                vault=vault)))  # to purge & finalize the caches in the end.

    # Kill all the daemons gracefully when the operator exits (so that they are not "hung").
    tasks.append(
        aiotasks.create_guarded_task(
            name="daemon killer",
            flag=started_flag,
            logger=logger,
            coro=daemons.daemon_killer(settings=settings, memories=memories)))

    # Keeping the credentials fresh and valid via the authentication handlers on demand.
    tasks.append(
        aiotasks.create_guarded_task(name="credentials retriever",
                                     flag=started_flag,
                                     logger=logger,
                                     coro=activities.authenticator(
                                         registry=registry,
                                         settings=settings,
                                         vault=vault)))

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.append(
        aiotasks.create_guarded_task(
            name="poster of events",
            flag=started_flag,
            logger=logger,
            coro=posting.poster(event_queue=event_queue)))

    # Liveness probing -- so that Kubernetes would know that the operator is alive.
    if liveness_endpoint:
        tasks.append(
            aiotasks.create_guarded_task(name="health reporter",
                                         flag=started_flag,
                                         logger=logger,
                                         coro=probing.health_reporter(
                                             registry=registry,
                                             settings=settings,
                                             endpoint=liveness_endpoint)))

    # Monitor the peers, unless explicitly disabled.
    if await peering.detect_presence(namespace=namespace, settings=settings):
        identity = peering.detect_own_id(manual=False)
        tasks.append(
            aiotasks.create_guarded_task(name="peering keepalive",
                                         flag=started_flag,
                                         logger=logger,
                                         coro=peering.keepalive(
                                             namespace=namespace,
                                             settings=settings,
                                             identity=identity)))
        tasks.append(
            aiotasks.create_guarded_task(
                name="watcher of peering",
                flag=started_flag,
                logger=logger,
                coro=queueing.watcher(
                    namespace=namespace,
                    settings=settings,
                    resource=peering.guess_resource(namespace=namespace),
                    processor=functools.partial(peering.process_peering_event,
                                                namespace=namespace,
                                                settings=settings,
                                                identity=identity,
                                                freeze_toggle=freeze_toggle))))

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.append(
            aiotasks.create_guarded_task(
                name=f"watcher of {resource.name}",
                flag=started_flag,
                logger=logger,
                coro=queueing.watcher(namespace=namespace,
                                      settings=settings,
                                      resource=resource,
                                      freeze_checker=freeze_checker,
                                      processor=functools.partial(
                                          processing.process_resource_event,
                                          lifecycle=lifecycle,
                                          registry=registry,
                                          settings=settings,
                                          memories=memories,
                                          resource=resource,
                                          event_queue=event_queue))))

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals
        try:
            loop.add_signal_handler(signal.SIGINT, signal_flag.set_result,
                                    signal.SIGINT)
            loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result,
                                    signal.SIGTERM)
        except NotImplementedError:
            logger.warning(
                "OS signals are ignored: can't add signal handler in Windows.")

    else:
        logger.warning(
            "OS signals are ignored: running not in the main thread.")

    return tasks
コード例 #20
0
ファイル: running.py プロジェクト: adewin/kopf
async def spawn_tasks(
    lifecycle: Optional[Callable] = None,
    registry: Optional[registries.GlobalRegistry] = None,
    standalone: bool = False,
    priority: int = 0,
    peering_name: Optional[str] = None,
    namespace: Optional[str] = None,
) -> Collection[asyncio.Task]:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle(
    )
    registry = registry if registry is not None else registries.get_default_registry(
    )
    event_queue = asyncio.Queue(loop=loop)
    freeze_flag = asyncio.Event(loop=loop)
    should_stop = asyncio.Event(loop=loop)
    tasks = []

    # A top-level task for external stopping by setting a stop-flag. Once set,
    # this task will exit, and thus all other top-level tasks will be cancelled.
    tasks.extend([
        loop.create_task(_stop_flag_checker(should_stop)),
    ])

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.extend([
        loop.create_task(posting.poster(event_queue=event_queue)),
    ])

    # Monitor the peers, unless explicitly disabled.
    ourselves: Optional[peering.Peer] = peering.Peer.detect(
        id=peering.detect_own_id(),
        priority=priority,
        standalone=standalone,
        namespace=namespace,
        name=peering_name,
    )
    if ourselves:
        tasks.extend([
            loop.create_task(peering.peers_keepalive(ourselves=ourselves)),
            loop.create_task(
                queueing.watcher(
                    namespace=namespace,
                    resource=ourselves.resource,
                    handler=functools.partial(
                        peering.peers_handler,
                        ourselves=ourselves,
                        freeze=freeze_flag))),  # freeze is set/cleared
        ])

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.extend([
            loop.create_task(
                queueing.watcher(
                    namespace=namespace,
                    resource=resource,
                    handler=functools.partial(
                        handling.custom_object_handler,
                        lifecycle=lifecycle,
                        registry=registry,
                        resource=resource,
                        event_queue=event_queue,
                        freeze=freeze_flag))),  # freeze is only checked
        ])

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        loop.add_signal_handler(signal.SIGINT, should_stop.set)
        loop.add_signal_handler(signal.SIGTERM, should_stop.set)
    else:
        logger.warning(
            "OS signals are ignored: running not in the main thread.")

    return tasks
コード例 #21
0
ファイル: test_id_generation.py プロジェクト: nnazeer/kopf
def test_useless_suffixes_removed(mocker, fqdn):
    mocker.patch('socket.gethostname', return_value=fqdn)
    mocker.patch('socket.gethostbyaddr',
                 side_effect=lambda fqdn: (fqdn, [], []))
    own_id = detect_own_id(manual=True)
    assert own_id == 'some-user@my-host'