Esempio n. 1
0
async def test_stop_itself_is_cancelled(assert_logs, caplog, cancelled):
    logger = logging.getLogger()
    caplog.set_level(0)
    task1 = create_task(simple())
    task2 = create_task(stuck())
    stask = create_task(stop([task1, task2], title='sample', logger=logger, interval=0.01, cancelled=cancelled))

    with async_timeout.timeout(1):
        done, pending = await asyncio.wait({stask}, timeout=0.011)
    assert not done
    assert task1.done()
    assert not task2.done()

    stask.cancel()

    with async_timeout.timeout(1):
        done, pending = await asyncio.wait({stask}, timeout=0.011)
    assert done
    assert task1.done()
    assert not task2.done()

    assert_logs([
        r"Sample tasks are not stopped: (finishing|cancelling) normally; tasks left: \{<Task",
        r"Sample tasks are not stopped: (cancelling|double-cancelling) at stopping; tasks left: \{<Task",
    ], prohibited=[
        r"Sample tasks are stopped",
    ])

    task2.cancel()
    with async_timeout.timeout(1):
        done, pending = await asyncio.wait({task1, task2})
    assert done
    assert task1.done()
    assert task2.done()
Esempio n. 2
0
async def test_stop_iteratively(assert_logs, caplog, cancelled):
    logger = logging.getLogger()
    caplog.set_level(0)
    task1 = create_task(simple())
    task2 = create_task(stuck())
    stask = create_task(
        stop([task1, task2],
             title='sample',
             logger=logger,
             interval=0.01,
             cancelled=cancelled))

    async with async_timeout.timeout(1):  # extra test safety
        done, pending = await asyncio.wait({stask}, timeout=0.011)
    assert not done
    assert task1.done()
    assert not task2.done()

    task2.cancel()

    async with async_timeout.timeout(1):  # extra test safety
        done, pending = await asyncio.wait({stask}, timeout=0.011)
    assert done
    assert task1.done()
    assert task2.done()

    assert_logs([
        r"Sample tasks are not stopped: (finishing|cancelling) normally; tasks left: \{<Task",
        r"Sample tasks are stopped: (finishing|cancelling) normally; tasks left: set\(\)",
    ])
Esempio n. 3
0
async def test_stop_immediately_with_cancelling(assert_logs, caplog):
    logger = logging.getLogger()
    caplog.set_level(0)
    task1 = create_task(simple())
    task2 = create_task(simple())
    with async_timeout.timeout(1):
        done, pending = await stop([task1, task2], title='sample', logger=logger, cancelled=True)
    assert done
    assert not pending
    assert_logs(["Sample tasks are stopped: cancelling normally"])
    assert task1.cancelled()
    assert task2.cancelled()
Esempio n. 4
0
async def test_alltasks_exclusion():
    flag = asyncio.Event()
    task1 = create_task(flag.wait())
    task2 = create_task(flag.wait())
    done, pending = await asyncio.wait([task1, task2], timeout=0.01)
    assert not done

    tasks = await all_tasks(ignored=[task2])
    assert task1 in tasks
    assert task2 not in tasks
    assert asyncio.current_task() not in tasks

    flag.set()
    await task1
    await task2
Esempio n. 5
0
async def _stop_flag_checker(
        signal_flag: aiotasks.Future,
        stop_flag: Optional[primitives.Flag],
) -> None:
    """
    A top-level task for external stopping by setting a stop-flag. Once set,
    this task will exit, and thus all other top-level tasks will be cancelled.
    """

    # Selects the flags to be awaited (if set).
    flags = []
    if signal_flag is not None:
        flags.append(signal_flag)
    if stop_flag is not None:
        flags.append(aiotasks.create_task(primitives.wait_flag(stop_flag),
                                          name="stop-flag waiter"))

    # Wait until one of the stoppers is set/raised.
    try:
        done, pending = await asyncio.wait(flags, return_when=asyncio.FIRST_COMPLETED)
        future = done.pop()
        result = await future
    except asyncio.CancelledError:
        pass  # operator is stopping for any other reason
    else:
        if result is None:
            logger.info("Stop-flag is raised. Operator is stopping.")
        elif isinstance(result, signal.Signals):
            logger.info("Signal %s is received. Operator is stopping.", result.name)
        else:
            logger.info("Stop-flag is set to %r. Operator is stopping.", result)
Esempio n. 6
0
async def test_py37_create_task_accepts_name(mocker):
    real_create_task = mocker.patch('asyncio.create_task')
    coro = sample()
    task = create_task(coro, name='unused')
    assert real_create_task.called
    assert task is real_create_task.return_value
    await coro  # to prevent "never awaited" errors
Esempio n. 7
0
async def test_wait_with_timeout():
    flag = asyncio.Event()
    task = create_task(flag.wait())
    done, pending = await wait([task], timeout=0.01)
    assert not done
    assert pending == {task}
    flag.set()
    await task
Esempio n. 8
0
async def test_reraise_skips_cancellations():
    task = create_task(asyncio.Event().wait())
    done, pending = await asyncio.wait([task], timeout=0.01)  # let it start
    assert not done
    task.cancel()
    done, pending = await asyncio.wait([task], timeout=0.01)  # let it react
    assert done
    await reraise([task])
Esempio n. 9
0
async def spawn_resource_daemons(
    *,
    settings: configuration.OperatorSettings,
    handlers: Sequence[handlers_.ResourceSpawningHandler],
    daemons: MutableMapping[handlers_.HandlerId, containers.Daemon],
    cause: causation.ResourceSpawningCause,
    memory: containers.ResourceMemory,
) -> Collection[float]:
    """
    Ensure that all daemons are spawned for this individual resource.

    This function can be called multiple times on multiple handling cycles
    (though usually should be called on the first-seen occasion), so it must
    be idempotent: not having duplicating side-effects on multiple calls.
    """
    if memory.live_fresh_body is None:  # for type-checking; "not None" is ensured in processing.
        raise RuntimeError(
            "A daemon is spawned with None as body. This is a bug. Please report."
        )
    for handler in handlers:
        if handler.id not in daemons:
            stopper = primitives.DaemonStopper()
            daemon_cause = causation.DaemonCause(
                resource=cause.resource,
                indices=cause.indices,
                logger=cause.logger,
                body=memory.live_fresh_body,
                memo=memory.memo,
                patch=patches.Patch(
                ),  # not the same as the one-shot spawning patch!
                stopper=stopper,  # for checking (passed to kwargs)
            )
            daemon = containers.Daemon(
                stopper=stopper,  # for stopping (outside of causes)
                handler=handler,
                logger=loggers.LocalObjectLogger(body=cause.body,
                                                 settings=settings),
                task=aiotasks.create_task(
                    _runner(
                        settings=settings,
                        daemons=daemons,  # for self-garbage-collection
                        handler=handler,
                        cause=daemon_cause,
                        memory=memory,
                    ),
                    name=f'runner of {handler.id}'
                ),  # sometimes, daemons; sometimes, timers.
            )
            daemons[handler.id] = daemon
    return []
Esempio n. 10
0
async def streaming_watch(
    *,
    settings: configuration.OperatorSettings,
    resource: resources.Resource,
    namespace: Optional[str],
    freeze_mode: Optional[primitives.Toggle] = None,
) -> AsyncIterator[bodies.RawEvent]:

    # Prevent both watching and listing while the freeze mode is on, until it is off.
    # Specifically, the watch-stream closes its connection once the freeze mode is on,
    # so the while-true & for-event-in-stream cycles exit, and this coroutine is started
    # again by the `infinite_stream()` (the watcher timeout is swallowed by the freeze time).
    if freeze_mode is not None and freeze_mode.is_on():
        logger.debug("Freezing the watch-stream for %r", resource)
        await freeze_mode.wait_for_off()
        logger.debug("Resuming the watch-stream for %r", resource)

    # A stop-feature is a client-specific way of terminating the streaming HTTPS connection
    # when a freeze-mode is turned on. The low-level API call attaches its `response.close()`
    # to the future's callbacks, and a background task triggers it when the mode is turned on.
    freeze_waiter: aiotasks.Future
    if freeze_mode is not None:
        freeze_waiter = aiotasks.create_task(
            freeze_mode.wait_for_on(),
            name=
            f'freeze-waiter for {resource.name} @ {namespace or "cluster-wide"}'
        )
    else:
        freeze_waiter = asyncio.Future()  # a dummy just ot have it

    try:
        stream = continuous_watch(
            settings=settings,
            resource=resource,
            namespace=namespace,
            freeze_waiter=freeze_waiter,
        )
        async for raw_event in stream:
            yield raw_event
    finally:
        with contextlib.suppress(asyncio.CancelledError):
            freeze_waiter.cancel()
            await freeze_waiter
Esempio n. 11
0
async def daemon_killer(
    *,
    settings: configuration.OperatorSettings,
    memories: containers.ResourceMemories,
) -> None:
    """
    An operator's root task to kill the daemons on the operator's shutdown.
    """

    # Sleep forever, or until cancelled, which happens when the operator begins its shutdown.
    try:
        await asyncio.Event().wait()

    # Terminate all running daemons when the operator exits (and this task is cancelled).
    finally:
        tasks = [
            aiotasks.create_task(name=f"stop daemon {daemon.handler.id}",
                                 coro=stop_daemon(daemon=daemon,
                                                  settings=settings))
            for memory in memories.iter_all_memories()
            for daemon in memory.running_daemons.values()
        ]
        await aiotasks.wait(tasks)
Esempio n. 12
0
async def spawn_tasks(
        *,
        lifecycle: Optional[lifecycles.LifeCycleFn] = None,
        indexers: Optional[indexing.OperatorIndexers] = None,
        registry: Optional[registries.OperatorRegistry] = None,
        settings: Optional[configuration.OperatorSettings] = None,
        memories: Optional[containers.ResourceMemories] = None,
        insights: Optional[references.Insights] = None,
        identity: Optional[peering.Identity] = None,
        standalone: Optional[bool] = None,
        priority: Optional[int] = None,
        peering_name: Optional[str] = None,
        liveness_endpoint: Optional[str] = None,
        clusterwide: bool = False,
        namespaces: Collection[references.NamespacePattern] = (),
        namespace: Optional[references.NamespacePattern] = None,  # deprecated
        stop_flag: Optional[primitives.Flag] = None,
        ready_flag: Optional[primitives.Flag] = None,
        vault: Optional[credentials.Vault] = None,
        memo: Optional[ephemera.AnyMemo] = None,
        _command: Optional[Coroutine[None, None, None]] = None,
) -> Collection[aiotasks.Task]:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    if namespaces and namespace:
        raise TypeError("Either namespaces= or namespace= can be passed. Got both.")
    elif namespace:
        warnings.warn("namespace= is deprecated; use namespaces=[...]", DeprecationWarning)
        namespaces = [namespace]

    if clusterwide and namespaces:
        raise TypeError("The operator can be either cluster-wide or namespaced, not both.")
    if not clusterwide and not namespaces:
        warnings.warn("Absence of either namespaces or cluster-wide flag will become an error soon."
                      " For now, switching to the cluster-wide mode for backward compatibility.",
                      FutureWarning)
        clusterwide = True

    # All tasks of the operator are synced via these primitives and structures:
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle()
    registry = registry if registry is not None else registries.get_default_registry()
    settings = settings if settings is not None else configuration.OperatorSettings()
    memories = memories if memories is not None else containers.ResourceMemories()
    indexers = indexers if indexers is not None else indexing.OperatorIndexers()
    insights = insights if insights is not None else references.Insights()
    identity = identity if identity is not None else peering.detect_own_id(manual=False)
    vault = vault if vault is not None else credentials.Vault()
    memo = memo if memo is not None else ephemera.Memo()
    event_queue: posting.K8sEventQueue = asyncio.Queue()
    signal_flag: aiotasks.Future = asyncio.Future()
    started_flag: asyncio.Event = asyncio.Event()
    operator_paused = primitives.ToggleSet(any)
    tasks: MutableSequence[aiotasks.Task] = []

    # Map kwargs into the settings object.
    settings.peering.clusterwide = clusterwide
    if peering_name is not None:
        settings.peering.mandatory = True
        settings.peering.name = peering_name
    if standalone is not None:
        settings.peering.standalone = standalone
    if priority is not None:
        settings.peering.priority = priority

    # Prepopulate indexers with empty indices -- to be available startup handlers.
    indexers.ensure(registry._resource_indexing.get_all_handlers())

    # Global credentials store for this operator, also for CRD-reading & peering mode detection.
    auth.vault_var.set(vault)

    # Special case: pass the settings container through the user-side handlers (no explicit args).
    # Toolkits have to keep the original operator context somehow, and the only way is contextvars.
    posting.settings_var.set(settings)

    # Few common background forever-running infrastructural tasks (irregular root tasks).
    tasks.append(aiotasks.create_task(
        name="stop-flag checker",
        coro=_stop_flag_checker(
            signal_flag=signal_flag,
            stop_flag=stop_flag)))
    tasks.append(aiotasks.create_task(
        name="ultimate termination",
        coro=_ultimate_termination(
            settings=settings,
            stop_flag=stop_flag)))
    tasks.append(aiotasks.create_task(
        name="startup/cleanup activities",
        coro=_startup_cleanup_activities(
            root_tasks=tasks,  # used as a "live" view, populated later.
            ready_flag=ready_flag,
            started_flag=started_flag,
            registry=registry,
            settings=settings,
            indices=indexers.indices,
            vault=vault,
            memo=memo)))  # to purge & finalize the caches in the end.

    # Kill all the daemons gracefully when the operator exits (so that they are not "hung").
    tasks.append(aiotasks.create_guarded_task(
        name="daemon killer", flag=started_flag, logger=logger,
        coro=daemons.daemon_killer(
            settings=settings,
            memories=memories,
            operator_paused=operator_paused)))

    # Keeping the credentials fresh and valid via the authentication handlers on demand.
    tasks.append(aiotasks.create_guarded_task(
        name="credentials retriever", flag=started_flag, logger=logger,
        coro=activities.authenticator(
            registry=registry,
            settings=settings,
            indices=indexers.indices,
            vault=vault,
            memo=memo)))

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.append(aiotasks.create_guarded_task(
        name="poster of events", flag=started_flag, logger=logger,
        coro=posting.poster(
            backbone=insights.backbone,
            event_queue=event_queue)))

    # Liveness probing -- so that Kubernetes would know that the operator is alive.
    if liveness_endpoint:
        tasks.append(aiotasks.create_guarded_task(
            name="health reporter", flag=started_flag, logger=logger,
            coro=probing.health_reporter(
                registry=registry,
                settings=settings,
                endpoint=liveness_endpoint,
                indices=indexers.indices,
                memo=memo)))

    # Permanent observation of what resource kinds and namespaces are available in the cluster.
    # Spawn and cancel dimensional tasks as they come and go; dimensions = resources x namespaces.
    tasks.append(aiotasks.create_guarded_task(
        name="resource observer", flag=started_flag, logger=logger,
        coro=observation.resource_observer(
            insights=insights,
            registry=registry,
            settings=settings)))
    tasks.append(aiotasks.create_guarded_task(
        name="namespace observer", flag=started_flag, logger=logger,
        coro=observation.namespace_observer(
            clusterwide=clusterwide,
            namespaces=namespaces,
            insights=insights,
            settings=settings)))

    # Explicit command is a hack for the CLI to run coroutines in an operator-like environment.
    # If not specified, then use the normal resource processing. It is not exposed publicly (yet).
    if _command is not None:
        tasks.append(aiotasks.create_guarded_task(
            name="the command", flag=started_flag, logger=logger, finishable=True,
            coro=_command))
    else:
        tasks.append(aiotasks.create_guarded_task(
            name="multidimensional multitasker", flag=started_flag, logger=logger,
            coro=orchestration.ochestrator(
                settings=settings,
                insights=insights,
                identity=identity,
                operator_paused=operator_paused,
                processor=functools.partial(processing.process_resource_event,
                                            lifecycle=lifecycle,
                                            registry=registry,
                                            settings=settings,
                                            indexers=indexers,
                                            memories=memories,
                                            memobase=memo,
                                            event_queue=event_queue))))

    # Ensure that all guarded tasks got control for a moment to enter the guard.
    await asyncio.sleep(0)

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals
        try:
            loop.add_signal_handler(signal.SIGINT, signal_flag.set_result, signal.SIGINT)
            loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result, signal.SIGTERM)
        except NotImplementedError:
            logger.warning("OS signals are ignored: can't add signal handler in Windows.")

    else:
        logger.warning("OS signals are ignored: running not in the main thread.")

    return tasks
Esempio n. 13
0
async def streaming_block(
    *,
    resource: resources.Resource,
    namespace: Optional[str],
    freeze_checker: Optional[primitives.ToggleSet],
) -> AsyncIterator[aiotasks.Future]:
    """
    Block the execution until the freeze is off; signal when it is on again.

    This prevents both watching and listing while the freeze mode is on,
    until it is off. Specifically, the watch-stream closes its connection
    once the freeze mode is on, so the while-true & for-event-in-stream cycles
    exit, and the streaming coroutine is started again by `infinite_stream()`
    (the watcher timeout is swallowed by the freeze time).

    Returns a future (or a task) that is set when the freeze is turned on again.

    A stop-future is a client-specific way of terminating the streaming HTTPS
    connections when the freeze is turned back on. The low-level streaming API
    call attaches its `response.close()` to the future's "done" callback,
    so that the stream is closed once the freeze is turned back on.

    Note: this routine belongs to watching and does not belong to peering.
    The freeze can be managed in any other ways: as an imaginary edge case,
    imagine a operator with UI with a "pause" button that freezes the operator.
    """
    where = f'in {namespace!r}' if namespace is not None else 'cluster-wide'

    # Block until unfrozen before even starting the API communication.
    if freeze_checker is not None and freeze_checker.is_on():
        names = {
            toggle.name
            for toggle in freeze_checker if toggle.is_on() and toggle.name
        }
        freezing_reason = f" (blockers: {', '.join(names)})" if names else ""
        logger.debug(
            f"Freezing the watch-stream for {resource} {where}{freezing_reason}."
        )

        await freeze_checker.wait_for(False)

        names = {
            toggle.name
            for toggle in freeze_checker if toggle.is_on() and toggle.name
        }
        resuming_reason = f" (resolved: {', '.join(names)})" if names else ""
        logger.debug(
            f"Resuming the watch-stream for {resource} {where}{resuming_reason}."
        )

    # Create the signalling future that the freeze is on again.
    freeze_waiter: aiotasks.Future
    if freeze_checker is not None:
        freeze_waiter = aiotasks.create_task(
            freeze_checker.wait_for(True),
            name=f"freeze-waiter for {resource}")
    else:
        freeze_waiter = asyncio.Future()  # a dummy just to have it

    # Go for the streaming with the prepared freezing/unfreezing setup.
    try:
        yield freeze_waiter
    finally:
        with contextlib.suppress(asyncio.CancelledError):
            freeze_waiter.cancel()
            await freeze_waiter
Esempio n. 14
0
async def spawn_tasks(
    *,
    lifecycle: Optional[lifecycles.LifeCycleFn] = None,
    registry: Optional[registries.OperatorRegistry] = None,
    settings: Optional[configuration.OperatorSettings] = None,
    memories: Optional[containers.ResourceMemories] = None,
    standalone: Optional[bool] = None,
    priority: Optional[int] = None,
    peering_name: Optional[str] = None,
    liveness_endpoint: Optional[str] = None,
    namespace: Optional[str] = None,
    stop_flag: Optional[primitives.Flag] = None,
    ready_flag: Optional[primitives.Flag] = None,
    vault: Optional[credentials.Vault] = None,
) -> Collection[aiotasks.Task]:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle(
    )
    registry = registry if registry is not None else registries.get_default_registry(
    )
    settings = settings if settings is not None else configuration.OperatorSettings(
    )
    memories = memories if memories is not None else containers.ResourceMemories(
    )
    vault = vault if vault is not None else global_vault
    vault = vault if vault is not None else credentials.Vault()
    event_queue: posting.K8sEventQueue = asyncio.Queue()
    freeze_name = f"{peering_name!r}@{namespace}" if namespace else f"cluster-wide {peering_name!r}"
    freeze_checker = primitives.ToggleSet()
    freeze_toggle = await freeze_checker.make_toggle(name=freeze_name)
    signal_flag: aiotasks.Future = asyncio.Future()
    started_flag: asyncio.Event = asyncio.Event()
    tasks: MutableSequence[aiotasks.Task] = []

    # Map kwargs into the settings object.
    if peering_name is not None:
        settings.peering.mandatory = True
        settings.peering.name = peering_name
    if standalone is not None:
        settings.peering.standalone = standalone
    if priority is not None:
        settings.peering.priority = priority

    # Global credentials store for this operator, also for CRD-reading & peering mode detection.
    auth.vault_var.set(vault)

    # Special case: pass the settings container through the user-side handlers (no explicit args).
    # Toolkits have to keep the original operator context somehow, and the only way is contextvars.
    posting.settings_var.set(settings)

    # Few common background forever-running infrastructural tasks (irregular root tasks).
    tasks.append(
        aiotasks.create_task(name="stop-flag checker",
                             coro=_stop_flag_checker(signal_flag=signal_flag,
                                                     stop_flag=stop_flag)))
    tasks.append(
        aiotasks.create_task(name="ultimate termination",
                             coro=_ultimate_termination(settings=settings,
                                                        stop_flag=stop_flag)))
    tasks.append(
        aiotasks.create_task(
            name="startup/cleanup activities",
            coro=_startup_cleanup_activities(
                root_tasks=tasks,  # used as a "live" view, populated later.
                ready_flag=ready_flag,
                started_flag=started_flag,
                registry=registry,
                settings=settings,
                vault=vault)))  # to purge & finalize the caches in the end.

    # Kill all the daemons gracefully when the operator exits (so that they are not "hung").
    tasks.append(
        aiotasks.create_guarded_task(
            name="daemon killer",
            flag=started_flag,
            logger=logger,
            coro=daemons.daemon_killer(settings=settings, memories=memories)))

    # Keeping the credentials fresh and valid via the authentication handlers on demand.
    tasks.append(
        aiotasks.create_guarded_task(name="credentials retriever",
                                     flag=started_flag,
                                     logger=logger,
                                     coro=activities.authenticator(
                                         registry=registry,
                                         settings=settings,
                                         vault=vault)))

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.append(
        aiotasks.create_guarded_task(
            name="poster of events",
            flag=started_flag,
            logger=logger,
            coro=posting.poster(event_queue=event_queue)))

    # Liveness probing -- so that Kubernetes would know that the operator is alive.
    if liveness_endpoint:
        tasks.append(
            aiotasks.create_guarded_task(name="health reporter",
                                         flag=started_flag,
                                         logger=logger,
                                         coro=probing.health_reporter(
                                             registry=registry,
                                             settings=settings,
                                             endpoint=liveness_endpoint)))

    # Monitor the peers, unless explicitly disabled.
    if await peering.detect_presence(namespace=namespace, settings=settings):
        identity = peering.detect_own_id(manual=False)
        tasks.append(
            aiotasks.create_guarded_task(name="peering keepalive",
                                         flag=started_flag,
                                         logger=logger,
                                         coro=peering.keepalive(
                                             namespace=namespace,
                                             settings=settings,
                                             identity=identity)))
        tasks.append(
            aiotasks.create_guarded_task(
                name="watcher of peering",
                flag=started_flag,
                logger=logger,
                coro=queueing.watcher(
                    namespace=namespace,
                    settings=settings,
                    resource=peering.guess_resource(namespace=namespace),
                    processor=functools.partial(peering.process_peering_event,
                                                namespace=namespace,
                                                settings=settings,
                                                identity=identity,
                                                freeze_toggle=freeze_toggle))))

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.append(
            aiotasks.create_guarded_task(
                name=f"watcher of {resource.name}",
                flag=started_flag,
                logger=logger,
                coro=queueing.watcher(namespace=namespace,
                                      settings=settings,
                                      resource=resource,
                                      freeze_checker=freeze_checker,
                                      processor=functools.partial(
                                          processing.process_resource_event,
                                          lifecycle=lifecycle,
                                          registry=registry,
                                          settings=settings,
                                          memories=memories,
                                          resource=resource,
                                          event_queue=event_queue))))

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals
        try:
            loop.add_signal_handler(signal.SIGINT, signal_flag.set_result,
                                    signal.SIGINT)
            loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result,
                                    signal.SIGTERM)
        except NotImplementedError:
            logger.warning(
                "OS signals are ignored: can't add signal handler in Windows.")

    else:
        logger.warning(
            "OS signals are ignored: running not in the main thread.")

    return tasks
Esempio n. 15
0
async def test_reraise_escalates_errors():
    task = create_task(fail("boo!"))
    await asyncio.wait([task], timeout=0.01)  # let it start & react
    with pytest.raises(Error):
        await reraise([task])